• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /**
18  * @addtogroup NeuralNetworks
19  * @{
20  */
21 
22 /**
23  * @file NeuralNetworksTypes.h
24  */
25 
26 #ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H
27 #define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H
28 
29 /******************************************************************
30  *
31  * IMPORTANT NOTICE:
32  *
33  *   This file is part of Android's set of stable system headers
34  *   exposed by the Android NDK (Native Development Kit).
35  *
36  *   Third-party source AND binary code relies on the definitions
37  *   here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES.
38  *
39  *   - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES)
40  *   - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS
41  *   - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY
42  *   - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES
43  */
44 
45 #include <android/hardware_buffer.h>
46 #include <stdbool.h>
47 #include <stddef.h>
48 #include <stdint.h>
49 #include <sys/cdefs.h>
50 
51 __BEGIN_DECLS
52 
53 /**
54  * Operand types.
55  *
56  * The type of an operand in a model.
57  *
58  * Types prefaced with ANEURALNETWORKS_TENSOR_* must be used for tensor data (i.e., tensors
59  * with at least one dimension). Types not prefaced by ANEURALNETWORKS_TENSOR_* represent
60  * scalar values and must have no dimensions.
61  *
62  * Although we define many types, most operators accept just a few
63  * types. Most used are {@link ANEURALNETWORKS_TENSOR_FLOAT32},
64  * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
65  * and {@link ANEURALNETWORKS_INT32}.
66  *
67  * Available since NNAPI feature level 1.
68  */
69 typedef enum {
70     /** A 32 bit floating point scalar value. */
71     ANEURALNETWORKS_FLOAT32 = 0,
72     /** A signed 32 bit integer scalar value. */
73     ANEURALNETWORKS_INT32 = 1,
74     /** An unsigned 32 bit integer scalar value. */
75     ANEURALNETWORKS_UINT32 = 2,
76     /** A tensor of 32 bit floating point values. */
77     ANEURALNETWORKS_TENSOR_FLOAT32 = 3,
78     /** A tensor of 32 bit integer values. */
79     ANEURALNETWORKS_TENSOR_INT32 = 4,
80     /**
81      * A tensor of 8 bit unsigned integers that represent real numbers.
82      *
83      * Attached to this tensor are two numbers that can be used to convert the
84      * 8 bit integer to the real value and vice versa. These two numbers are:
85      * - scale: a 32 bit floating point value greater than zero.
86      * - zeroPoint: a 32 bit integer, in range [0, 255].
87      *
88      * The formula is:
89      *   real_value = (integer_value - zeroPoint) * scale.
90      */
91     ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5,
92     /**
93      * An 8 bit boolean scalar value.
94      *
95      * Values of this operand type are either true or false. A zero value
96      * represents false; any other value represents true.
97      *
98      * Available since NNAPI feature level 3.
99      */
100     ANEURALNETWORKS_BOOL = 6,
101     /**
102      * A tensor of 16 bit signed integers that represent real numbers.
103      *
104      * Attached to this tensor is a number representing real value scale that is
105      * used to convert the 16 bit number to a real value in the following way:
106      * realValue = integerValue * scale.
107      *
108      * scale is a 32 bit floating point with value greater than zero.
109      *
110      * Available since NNAPI feature level 3.
111      */
112     ANEURALNETWORKS_TENSOR_QUANT16_SYMM = 7,
113     /**
114      * A tensor of IEEE 754 16 bit floating point values.
115      *
116      * Available since NNAPI feature level 3.
117      */
118     ANEURALNETWORKS_TENSOR_FLOAT16 = 8,
119     /**
120      * A tensor of 8 bit boolean values.
121      *
122      * Values of this operand type are either true or false. A zero value
123      * represents false; any other value represents true.
124      *
125      * Available since NNAPI feature level 3.
126      */
127     ANEURALNETWORKS_TENSOR_BOOL8 = 9,
128     /**
129      * An IEEE 754 16 bit floating point scalar value.
130      *
131      * Available since NNAPI feature level 3.
132      */
133     ANEURALNETWORKS_FLOAT16 = 10,
134     /**
135      * A tensor of 8 bit signed integers that represent real numbers.
136      *
137      * This tensor is associated with additional fields that can
138      * be used to convert the 8 bit signed integer to the real value and vice versa.
139      * These fields are:
140      * - channelDim: a 32 bit unsigned integer indicating channel dimension.
141      * - scales: an array of positive 32 bit floating point values.
142      * The size of the scales array must be equal to dimensions[channelDim].
143      *
144      * {@link ANeuralNetworksModel_setOperandSymmPerChannelQuantParams} must be used
145      * to set the parameters for an Operand of this type.
146      *
147      * The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0).
148      *
149      * The formula is:
150      * realValue[..., C, ...] =
151      *     integerValue[..., C, ...] * scales[C]
152      * where C is an index in the Channel dimension.
153      *
154      * Available since NNAPI feature level 3.
155      */
156     ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
157     /**
158      * A tensor of 16 bit unsigned integers that represent real numbers.
159      *
160      * Attached to this tensor are two numbers that can be used to convert the
161      * 16 bit integer to the real value and vice versa. These two numbers are:
162      * - scale: a 32 bit floating point value greater than zero.
163      * - zeroPoint: a 32 bit integer, in range [0, 65535].
164      *
165      * The formula is:
166      * real_value = (integer_value - zeroPoint) * scale.
167      *
168      * Available since NNAPI feature level 3.
169      */
170     ANEURALNETWORKS_TENSOR_QUANT16_ASYMM = 12,
171     /**
172      * A tensor of 8 bit signed integers that represent real numbers.
173      *
174      * Attached to this tensor is a number representing real value scale that is
175      * used to convert the 8 bit number to a real value in the following way:
176      * realValue = integerValue * scale.
177      *
178      * scale is a 32 bit floating point with value greater than zero.
179      *
180      * Available since NNAPI feature level 3.
181      */
182     ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 13,
183     /**
184      * A tensor of 8 bit signed integers that represent real numbers.
185      *
186      * Attached to this tensor are two numbers that can be used to convert the
187      * 8 bit integer to the real value and vice versa. These two numbers are:
188      * - scale: a 32 bit floating point value greater than zero.
189      * - zeroPoint: a 32 bit integer, in range [-128, 127].
190      *
191      * The formula is:
192      * real_value = (integer_value - zeroPoint) * scale.
193      *
194      * Available since NNAPI feature level 4.
195      */
196     ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED = 14,
197 
198     /**
199      * A reference to a model.
200      *
201      * {@link ANeuralNetworksModel_setOperandValueFromModel} must be used to set
202      * the value for an Operand of this type.
203      *
204      * Available since NNAPI feature level 4.
205      */
206     ANEURALNETWORKS_MODEL = 15,
207 } OperandCode;
208 
209 /**
210  * Operation types.
211  *
212  * The type of an operation in a model.
213  *
214  * Available since NNAPI feature level 1.
215  */
216 typedef enum {
217     // Operations below are available since NNAPI feature level 1.
218 
219     /**
220      * Adds two tensors, element-wise.
221      *
222      * Takes two input tensors of identical {@link OperandCode} and compatible
223      * dimensions. The output is the sum of both input tensors, optionally
224      * modified by an activation function.
225      *
226      * Two dimensions are compatible when:
227      *     1. they are equal, or
228      *     2. one of them is 1
229      *
230      * The size of the output is the maximum size along each dimension of the
231      * input operands. It starts with the trailing dimensions, and works its
232      * way forward.
233      *
234      * Example:
235      *
236      *     input1.dimension = {4, 1, 2}
237      *     input2.dimension = {5, 4, 3, 1}
238      *     output.dimension = {5, 4, 3, 2}
239      *
240      * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero
241      * dimension is only compatible with 0 or 1. The size of the output
242      * dimension is zero if either of corresponding input dimension is zero.
243      *
244      * Supported tensor {@link OperandCode}:
245      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
246      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
247      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
248      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
249      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
250      *
251      * Supported tensor rank: up to 4
252      *
253      * Inputs:
254      * * 0: A tensor.
255      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
256      *      as input0.
257      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
258      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
259      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
260      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
261      *      {@link FuseCode} values. Specifies the activation to
262      *      invoke on the result.
263      *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
264      *      the {@link FuseCode} must be "NONE".
265      *
266      * Outputs:
267      * * 0: The sum, a tensor of the same {@link OperandCode} as input0.
268      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
269      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
270      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
271      *
272      * Available since NNAPI feature level 1.
273      */
274     ANEURALNETWORKS_ADD = 0,
275 
276     /**
277      * Performs a 2-D average pooling operation.
278      *
279      * The output dimensions are functions of the filter dimensions, stride, and
280      * padding.
281      *
282      * The values in the output tensor are computed as:
283      *
284      *     output[b, i, j, channel] =
285      *         sum_{di, dj}(
286      *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
287      *         ) / sum(1)
288      *
289      * Supported tensor {@link OperandCode}:
290      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
291      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
292      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
293      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
294      *
295      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
296      * With the default data layout NHWC, the data is stored in the order of:
297      * [batch, height, width, channels]. Alternatively, the data layout could
298      * be NCHW, the data storage order of: [batch, channels, height, width].
299      * NCHW is supported since NNAPI feature level 3.
300      *
301      * Both explicit padding and implicit padding are supported.
302      *
303      * Inputs (explicit padding):
304      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
305      *      the input.
306      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
307      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
308      *      the left, in the ‘width’ dimension.
309      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
310      *      the right, in the ‘width’ dimension.
311      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
312      *      the top, in the ‘height’ dimension.
313      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
314      *      the bottom, in the ‘height’ dimension.
315      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
316      *      walking through input in the ‘width’ dimension.
317      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
318      *      walking through input in the ‘height’ dimension.
319      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
320      *      width.
321      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
322      *      height.
323      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
324      *      {@link FuseCode} values. Specifies the activation to
325      *      invoke on the result.
326      * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
327      *       Set to true to specify NCHW data layout for input0 and output0.
328      *       Available since NNAPI feature level 3.
329      *
330      * Inputs (implicit padding):
331      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
332      *      the input.
333      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
334      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
335      *      padding scheme, has to be one of the
336      *      {@link PaddingCode} values.
337      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
338      *      walking through input in the ‘width’ dimension.
339      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
340      *      walking through input in the ‘height’ dimension.
341      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
342      *      width.
343      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
344      *      height.
345      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
346      *      {@link FuseCode} values. Specifies the activation to
347      *      invoke on the result.
348      * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
349      *      Set to true to specify NCHW data layout for input0 and output0.
350      *      Available since NNAPI feature level 3.
351      *
352      * Outputs:
353      * * 0: The output 4-D tensor, of shape
354      *      [batches, out_height, out_width, depth].
355      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
356      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
357      *      the scale and zeroPoint must be the same as input0.
358      *
359      * Available since NNAPI feature level 1.
360      */
361     ANEURALNETWORKS_AVERAGE_POOL_2D = 1,
362 
363     /**
364      * Concatenates the input tensors along the given dimension.
365      *
366      * The input tensors must have identical {@link OperandCode} and the same
367      * dimensions except the dimension along the concatenation axis.
368      *
369      * Supported tensor {@link OperandCode}:
370      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
371      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
372      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
373      *   (full support since NNAPI feature level 3, see the input section)
374      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
375      *
376      * Supported tensor rank: up to 4
377      *
378      * Inputs:
379      * * 0 ~ n-1: The list of n input tensors, of shape
380      *            [D0, D1, ..., Daxis(i), ..., Dm].
381      *            Before NNAPI feature level 3, all input tensors of
382      *            {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
383      *            must have the same scale and zeroPoint as the output tensor.
384      *            Input tensors of
385      *            {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
386      *            are allowed to have different scale and zeroPoint.
387      *            Since NNAPI feature level 3, zero-sized tensors are supported.
388      * * n: An {@link ANEURALNETWORKS_INT32} scalar, specifying the
389      *      concatenation axis.
390      *
391      * Outputs:
392      * * 0: The output, a tensor of the same {@link OperandCode} as the input
393      *      tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
394      *      Since NNAPI feature level 3, for a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
395      *      the scale and zeroPoint values can be different from
396      *      input tensors. Before NNAPI feature level 3 they have to be the same as for the
397      *      input tensors.
398      *
399      * Available since NNAPI feature level 1.
400      */
401     ANEURALNETWORKS_CONCATENATION = 2,
402 
403     /**
404      * Performs a 2-D convolution operation.
405      *
406      * The CONV_2D op sweeps a 2-D filter that can mix channels together over a
407      * batch of images, applying the filter to each window of each image of the
408      * appropriate size.
409      *
410      * The output dimensions are functions of the filter dimensions, stride, and
411      * padding.
412      *
413      * The values in the output tensor are computed as:
414      *
415      *     output[b, i, j, channel] =
416      *         sum_{di, dj, k} (
417      *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
418      *             filter[channel, di, dj, k]
419      *         ) + bias[channel]
420      *
421      * Supported tensor {@link OperandCode} configurations:
422      * * 32 bit floating point:
423      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
424      *
425      * * Quantized:
426      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
427      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
428      * * * input.scale * filter.scale).
429      *
430      * Available since NNAPI feature level 3:
431      * * 16 bit floating point:
432      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
433      *
434      * * Quantized with symmetric per channel quantization for the filter:
435      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
436      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
437      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
438      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
439      *
440      * Available since NNAPI feature level 4:
441      * * Quantized signed (since NNAPI feature level 4):
442      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
443      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
444      * * * input.scale * filter.scale).
445      *
446      * * Quantized signed with filter symmetric per channel quantization
447      *   (since NNAPI feature level 4):
448      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
449      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
450      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
451      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
452      *
453      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
454      * With the default data layout NHWC, the data is stored in the order of:
455      * [batch, height, width, channels]. Alternatively, the data layout could
456      * be NCHW, the data storage order of: [batch, channels, height, width].
457      * NCHW is supported since NNAPI feature level 3.
458      *
459      * Both explicit padding and implicit padding are supported.
460      *
461      * Inputs (explicit padding):
462      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
463      *      specifying the input.
464      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
465      * * 1: A 4-D tensor, of shape
466      *      [depth_out, filter_height, filter_width, depth_in], specifying the
467      *      filter.
468      *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
469      *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
470      *      must be set to 0.
471      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
472      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
473      *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.
474      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
475      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
476      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
477      *      of 0 and bias_scale == input_scale * filter_scale.
478      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
479      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
480      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
481      *      bias_scale[i] = input_scale * filter_scale[i].
482      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
483      *      the left, in the ‘width’ dimension.
484      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
485      *      the right, in the ‘width’ dimension.
486      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
487      *      the top, in the ‘height’ dimension.
488      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
489      *      the bottom, in the ‘height’ dimension.
490      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
491      *      walking through input in the ‘width’ dimension.
492      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
493      *      walking through input in the ‘height’ dimension.
494      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
495      *      {@link FuseCode} values. Specifies the activation to
496      *      invoke on the result.
497      * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
498      *      Set to true to specify NCHW data layout for input0 and output0.
499      *      Available since NNAPI feature level 3.
500      * * 11: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
501      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
502      *      cells between each filter element on width dimension. If this input is set,
503      *      input 12 (dilation factor for height) must be specified as well.
504      *      Available since NNAPI feature level 3.
505      * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
506      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
507      *      cells between each filter element on height dimension. If this input is set,
508      *      input 11 (dilation factor for width) must be specified as well.
509      *      Available since NNAPI feature level 3.
510      *
511      * Inputs (implicit padding):
512      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
513      *      specifying the input.
514      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
515      * * 1: A 4-D tensor, of shape
516      *      [depth_out, filter_height, filter_width, depth_in], specifying the
517      *      filter.
518      *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
519      *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
520      *      must be set to 0.
521      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
522      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
523      *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same
524      *      type.
525      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
526      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
527      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
528      *      of 0 and bias_scale == input_scale * filter_scale.
529      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
530      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
531      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
532      *      bias_scale[i] = input_scale * filter_scale[i].
533      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
534      *      padding scheme, has to be one of the
535      *      {@link PaddingCode} values.
536      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
537      *      walking through input in the ‘width’ dimension.
538      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
539      *      walking through input in the ‘height’ dimension.
540      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
541      *      {@link FuseCode} values. Specifies the activation to
542      *      invoke on the result.
543      * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
544      *      Set to true to specify NCHW data layout for input0 and output0.
545      *      Available since NNAPI feature level 3.
546      * * 8: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
547      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
548      *      cells between each filter element on width dimension. If this input is set,
549      *      input 9 (dilation factor for height) must be specified as well.
550      *      Available since NNAPI feature level 3.
551      * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
552      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
553      *      cells between each filter element on height dimension. If this input is set,
554      *      input 8 (dilation factor for width) must be specified as well.
555      *      Available since NNAPI feature level 3.
556      *
557      * Outputs:
558      * * 0: The output 4-D tensor, of shape
559      *      [batches, out_height, out_width, depth_out].
560      *      Before NNAPI feature level 3, for output tensor of
561      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following condition must
562      *      be satisfied: output_scale > input_scale * filter_scale
563      *
564      * Available since NNAPI feature level 1.
565      */
566     ANEURALNETWORKS_CONV_2D = 3,
567 
568     /**
569      * Performs a depthwise 2-D convolution operation.
570      *
571      * Given an input tensor of shape [batches, height, width, depth_in] and a
572      * filter tensor of shape [1, filter_height, filter_width, depth_out]
573      * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV
574      * applies a different filter to each input channel (expanding from 1
575      * channel to channel_multiplier channels for each), then concatenates the
576      * results together.
577      *
578      * The output has depth_out = depth_in * depth_multiplier channels.
579      * The output dimensions are functions of the filter dimensions, stride, and
580      * padding.
581      *
582      * The values in the output tensor are computed as:
583      *
584      *     output[b, i, j, k * channel_multiplier + q] =
585      *         sum_{di, dj} (
586      *             input[b, strides[1] * i + di, strides[2] * j + dj, k] *
587      *             filter[1, di, dj, k * channel_multiplier + q]
588      *         ) + bias[k * channel_multiplier + q]
589      *
590      * Supported tensor {@link OperandCode} configurations:
591      * * 32 bit floating point:
592      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
593      *
594      * * Quantized:
595      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
596      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
597      * * * input.scale * filter.scale).
598      *
599      * Available since NNAPI feature level 3:
600      * * 16 bit floating point:
601      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
602      *
603      * * Quantized with symmetric per channel quantization for the filter:
604      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
605      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
606      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
607      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
608      *
609      * Available since NNAPI feature level 4:
610      * * Quantized signed (since NNAPI feature level 4):
611      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
612      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
613      * * * input.scale * filter.scale).
614      *
615      * * Quantized signed with filter symmetric per channel quantization
616      *   (since NNAPI feature level 4):
617      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
618      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
619      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
620      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
621      *
622      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
623      * With the default data layout NHWC, the data is stored in the order of:
624      * [batch, height, width, channels]. Alternatively, the data layout could
625      * be NCHW, the data storage order of: [batch, channels, height, width].
626      * NCHW is supported since NNAPI feature level 3.
627      *
628      * Both explicit padding and implicit padding are supported.
629      *
630      * Inputs (explicit padding):
631      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
632      *      specifying the input.
633      * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
634      *      specifying the filter.
635      *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
636      *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
637      *      must be set to 3.
638      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
639      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
640      *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.
641      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
642      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
643      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
644      *      of 0 and bias_scale == input_scale * filter_scale.
645      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
646      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
647      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
648      *      bias_scale[i] = input_scale * filter_scale[i].
649      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
650      *      the left, in the ‘width’ dimension.
651      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
652      *      the right, in the ‘width’ dimension.
653      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
654      *      the top, in the ‘height’ dimension.
655      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
656      *      the bottom, in the ‘height’ dimension.
657      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
658      *      walking through input in the ‘width’ dimension.
659      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
660      *      walking through input in the ‘height’ dimension.
661      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise
662      *      multiplier.
663      * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
664      *       {@link FuseCode} values. Specifies the activation to
665      *       invoke on the result.
666      * * 11: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
667      *       Set to true to specify NCHW data layout for input0 and output0.
668      *       Available since NNAPI feature level 3.
669      * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
670      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
671      *      cells between each filter element on width dimension. If this input is set,
672      *      input 13 (dilation factor for height) must be specified as well.
673      *      Available since NNAPI feature level 3.
674      * * 13: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
675      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
676      *      cells between each filter element on height dimension. If this input is set,
677      *      input 12 (dilation factor for width) must be specified as well.
678      *      Available since NNAPI feature level 3.
679      *
680      * Inputs (implicit padding):
681      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
682      *      specifying the input.
683      * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
684      *      specifying the filter.
685      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
686      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32}
687      *      or {@link ANEURALNETWORKS_TENSOR_FLOAT16} the bias must be of the same type.
688      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
689      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
690      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
691      *      of 0 and bias_scale == input_scale * filter_scale.
692      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
693      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
694      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
695      *      bias_scale[i] = input_scale * filter_scale[i].
696      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
697      *      padding scheme, has to be one of the
698      *      {@link PaddingCode} values.
699      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
700      *      walking through input in the ‘width’ dimension.
701      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
702      *      walking through input in the ‘height’ dimension.
703      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise
704      *      multiplier.
705      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
706      *      {@link FuseCode} values. Specifies the activation to
707      *      invoke on the result.
708      * * 8: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
709      *      Set to true to specify NCHW data layout for input0 and output0.
710      *      Available since NNAPI feature level 3.
711      * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
712      *      factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
713      *      cells between each filter element on width dimension. If this input is set,
714      *      input 10 (dilation factor for height) must be specified as well.
715      *      Available since NNAPI feature level 3.
716      * * 10: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation
717      *      factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
718      *      cells between each filter element on height dimension. If this input is set,
719      *      input 9 (dilation factor for width) must be specified as well.
720      *      Available since NNAPI feature level 3.
721      *
722      * Outputs:
723      * * 0: The output 4-D tensor, of shape
724      *      [batches, out_height, out_width, depth_out]. Before NNAPI feature level 3, for
725      *      output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
726      *      the following condition must be satisfied:
727      *      output_scale > input_scale * filter_scale
728      *
729      * Available since NNAPI feature level 1.
730      */
731     ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4,
732 
733     /**
734      * Rearranges data from depth into blocks of spatial data.
735      *
736      * More specifically, this op outputs a copy of the input tensor where
737      * values from the depth dimension are moved in spatial blocks to the height
738      * and width dimensions. The value block_size indicates the input block size
739      * and how the data is moved.
740      *
741      * Chunks of data of size block_size * block_size from depth are rearranged
742      * into non-overlapping blocks of size block_size x block_size.
743      *
744      * The width of the output tensor is input_depth * block_size, whereas the
745      * height is input_height * block_size. The depth of the input tensor must
746      * be divisible by block_size * block_size
747      *
748      * Supported tensor {@link OperandCode}:
749      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
750      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
751      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
752      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
753      *
754      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
755      * With the default data layout NHWC, the data is stored in the order of:
756      * [batch, height, width, channels]. Alternatively, the data layout could
757      * be NCHW, the data storage order of: [batch, channels, height, width].
758      * NCHW is supported since NNAPI feature level 3.
759      *
760      * Inputs:
761      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
762      *      specifying the input.
763      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.
764      *      block_size must be >=1 and block_size * block_size must be a divisor
765      *      of the input depth.
766      * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
767      *      Set to true to specify NCHW data layout for input0 and output0.
768      *      Available since NNAPI feature level 3.
769      *
770      * Outputs:
771      * * 0: The output 4-D tensor, of shape [batch, height*block_size,
772      *      width*block_size, depth/(block_size*block_size)].
773      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
774      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
775      *      the scale and zeroPoint must be the same as input0.
776      *
777      * Available since NNAPI feature level 1.
778      */
779     ANEURALNETWORKS_DEPTH_TO_SPACE = 5,
780 
781     /**
782      * Dequantizes the input tensor.
783      *
784      * The formula is:
785      *
786      *     output = (input - zeroPoint) * scale.
787      *
788      * Supported input tensor {@link OperandCode}:
789      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
790      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} (since NNAPI feature level 3)
791      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} (since NNAPI feature level 3)
792      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
793      *
794      * Supported output tensor {@link OperandCode}:
795      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
796      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
797      *
798      * Supported tensor rank: up to 4
799      *
800      * Inputs:
801      * * 0: A tensor.
802      *      Since NNAPI feature level 3, this tensor may be zero-sized.
803      *
804      * Outputs:
805      * * 0: A tensor with the same shape as input0.
806      *
807      * Available since NNAPI feature level 1.
808      */
809     ANEURALNETWORKS_DEQUANTIZE = 6,
810 
811     /**
812      * Looks up sub-tensors in the input tensor.
813      *
814      * This operator takes for input a tensor of values (Values) and
815      * a one-dimensional tensor of selection indices (Lookups).
816      * The output tensor is the concatenation of sub-tensors of Values as
817      * selected by Lookups.
818      *
819      * Think of Values as being sliced along its first dimension:
820      * The entries in Lookups select which slices are concatenated together
821      * to create the output tensor.
822      *
823      * For example, if Values has shape of [40, 200, 300] and
824      * Lookups has shape of [3], all three values found in Lookups are
825      * expected to be between 0 and 39. The resulting tensor must
826      * have shape of [3, 200, 300].
827      *
828      * If a value in Lookups is out of bounds, the operation must fail
829      * and an error must be reported.
830      *
831      * Supported value tensor {@link OperandCode}:
832      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 4)
833      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
834      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 3)
835      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
836      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
837      *
838      * Supported value tensor rank: from 2
839      *
840      * Inputs:
841      * * 0: Lookups. A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}.
842      *      The values are indices into the first dimension of Values.
843      * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are
844      *      extracted.
845      *
846      * Output:
847      * * 0: A n-D tensor with the same rank and shape as the Values
848      *      tensor, except for the first dimension which has the same size
849      *      as Lookups' only dimension.
850      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
851      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
852      *      the scale and zeroPoint must be the same as input1.
853      *
854      * Available since NNAPI feature level 1.
855      */
856     ANEURALNETWORKS_EMBEDDING_LOOKUP = 7,
857 
858     /**
859      * Computes element-wise floor() on the input tensor.
860      *
861      * Supported tensor {@link OperandCode}:
862      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
863      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
864      *
865      * Supported tensor rank: up to 4
866      *
867      * Inputs:
868      * * 0: A tensor.
869      *
870      * Outputs:
871      * * 0: The output tensor, of the same {@link OperandCode} and dimensions as
872      *      the input tensor.
873      *
874      * Available since NNAPI feature level 1.
875      */
876     ANEURALNETWORKS_FLOOR = 8,
877 
878     /**
879      * Denotes a fully (densely) connected layer, which connects all elements
880      * in the input tensor with each element in the output tensor.
881      *
882      * This layer implements the operation:
883      *
884      *     outputs = activation(inputs * weights’ + bias)
885      *
886      * Supported tensor {@link OperandCode}:
887      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
888      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
889      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
890      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
891      *
892      * Supported tensor rank: up to 4.
893      *
894      * Inputs:
895      * * 0: A tensor of at least rank 2, specifying the input. If rank is
896      *      greater than 2, then it gets flattened to a 2-D Tensor. The
897      *      (flattened) 2-D Tensor is reshaped (if necessary) to
898      *      [batch_size, input_size], where "input_size" corresponds to the
899      *      number of inputs to the layer, matching the second dimension of
900      *      weights, and "batch_size" is calculated by dividing the number of
901      *      elements by "input_size".
902      *      Since NNAPI feature level 3, zero batch_size is supported for this tensor.
903      * * 1: A 2-D tensor, specifying the weights, of shape
904      *      [num_units, input_size], where "num_units" corresponds to the number
905      *      of output nodes.
906      * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
907      *      tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should
908      *      also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
909      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
910      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
911      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},
912      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
913      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
914      *      {@link FuseCode} values. Specifies the activation to
915      *      invoke on the result.
916      *
917      * Outputs:
918      * * 0: The output tensor, of shape [batch_size, num_units]. Before NNAPI feature level 3, for
919      *      output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following
920      *      condition must be satisfied: output_scale > input_scale * filter_scale.
921      *
922      * Available since NNAPI feature level 1.
923      */
924     ANEURALNETWORKS_FULLY_CONNECTED = 9,
925 
926     /**
927      * Looks up sub-tensors in the input tensor using a key-value map.
928      *
929      * This operator takes for input a tensor of values (Values),
930      * a one-dimensional tensor of selection values (Lookups) and
931      * a one-dimensional tensor that maps these values to Values
932      * indexes. The output tensor is the concatenation of sub-tensors of
933      * Values as selected by Lookups via Keys.
934      *
935      * Think of Values as being sliced along its outer-most dimension.
936      * The output is a concatenation of selected slices, with one slice
937      * for each entry of Lookups. The slice selected is the one at the
938      * same index as the Maps entry that matches the value in Lookups.
939      *
940      * For a hit, the corresponding sub-tensor of Values is included
941      * in the Output tensor. For a miss, the corresponding sub-tensor in
942      * Output must have zero values.
943      *
944      * For example, if Values has shape of [40, 200, 300],
945      * Keys should have a shape of [40]. If Lookups tensor has shape
946      * of [3], three slices are being concatenated, so the resulting tensor
947      * must have the shape of [3, 200, 300]. If the first entry in Lookups
948      * has the value 123456, that value must be located in Keys tensor.
949      * If the sixth entry of Keys contains 123456, the sixth slice of Values
950      * must be selected. If no entry in Keys has 123456, a slice of zeroes
951      * must be concatenated.
952      *
953      * Supported value tensor {@link OperandCode}:
954      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
955      * * {@link ANEURALNETWORKS_TENSOR_INT32}
956      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
957      *
958      * Supported value tensor rank: from 2
959      *
960      * Inputs:
961      * * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with
962      *      shape [ k ].
963      * * 1: Keys. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape
964      *      [ n ]; Keys and Values pair represent a map, i.e., the ith element
965      *      in Keys (Keys[i]) is the key to select the ith sub-tensor in Values
966      *      (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in
967      *      ascending order.
968      * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension
969      *      must be n.
970      *
971      * Outputs:
972      * * 0: Output. A tensor with shape [ k …].
973      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
974      *      the scale and zeroPoint must be the same as input2.
975      * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
976      *      hits (True) or not (False).
977      *      Stored as {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} with offset 0
978      *      and scale 1.0f.
979      *      A non-zero byte represents True, a hit. A zero indicates otherwise.
980      *
981      * Available since NNAPI feature level 1.
982      */
983     ANEURALNETWORKS_HASHTABLE_LOOKUP = 10,
984 
985     /**
986      * Applies L2 normalization along the axis dimension.
987      *
988      * The values in the output tensor are computed as:
989      *
990      *     output[batch, row, col, channel] =
991      *         input[batch, row, col, channel] /
992      *         sqrt(sum_{c} pow(input[batch, row, col, c], 2))
993      *
994      * By default the axis dimension is the last dimension of the input tensor.
995      *
996      * Supported tensor {@link OperandCode}:
997      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
998      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
999      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
1000      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1001      *
1002      * Supported tensor rank: up to 4
1003      * Tensors with rank less than 4 are only supported since NNAPI feature level 3.
1004      *
1005      * Inputs:
1006      * * 0: An n-D tensor, specifying the tensor to be normalized.
1007      * * 1: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,
1008      *      specifying the dimension normalization would be performed on.
1009      *      Negative index is used to specify axis from the end (e.g. -1 for
1010      *      the last axis). Must be in the range [-n, n).
1011      *      Available since NNAPI feature level 3.
1012      *
1013      * Outputs:
1014      * * 0: A tensor of the same {@link OperandCode} and same shape as input0.
1015      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
1016      *      the scale must be 1.f / 128 and the zeroPoint must be 128.
1017      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1018      *      the scale must be 1.f / 128 and the zeroPoint must be 0.
1019      *
1020      *      NOTE: Before NNAPI feature level 4, if the elements along an axis are all zeros,
1021      *      the result is undefined. Since NNAPI feature level 4, if the elements along an axis
1022      *      are all zeros, the result is logical zero.
1023      *
1024      * Available since NNAPI feature level 1.
1025      */
1026     ANEURALNETWORKS_L2_NORMALIZATION = 11,
1027 
1028     /**
1029      * Performs an 2-D L2 pooling operation.
1030      *
1031      * The output dimensions are functions of the filter dimensions, stride, and
1032      * padding.
1033      *
1034      * The values in the output tensor are computed as:
1035      *
1036      *     output[b, i, j, c] =
1037      *         sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) /
1038      *              sum(1))
1039      *
1040      * Supported tensor {@link OperandCode}:
1041      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1042      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1043      *
1044      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1045      * With the default data layout NHWC, the data is stored in the order of:
1046      * [batch, height, width, channels]. Alternatively, the data layout could
1047      * be NCHW, the data storage order of: [batch, channels, height, width].
1048      * NCHW is supported since NNAPI feature level 3.
1049      *
1050      * Both explicit padding and implicit padding are supported.
1051      *
1052      * Inputs (explicit padding):
1053      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1054      *      the input.
1055      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1056      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1057      *      the left, in the ‘width’ dimension.
1058      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1059      *      the right, in the ‘width’ dimension.
1060      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1061      *      the top, in the ‘height’ dimension.
1062      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1063      *      the bottom, in the ‘height’ dimension.
1064      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1065      *      walking through input in the ‘width’ dimension.
1066      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1067      *      walking through input in the ‘height’ dimension.
1068      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1069      *      width.
1070      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1071      *      height.
1072      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1073      *      {@link FuseCode} values. Specifies the activation to
1074      *      invoke on the result.
1075      * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1076      *       Set to true to specify NCHW data layout for input0 and output0.
1077      *       Available since NNAPI feature level 3.
1078      *
1079      * Inputs (implicit padding):
1080      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1081      *      the input.
1082      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1083      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
1084      *      padding scheme, has to be one of the
1085      *      {@link PaddingCode} values.
1086      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1087      *      walking through input in the ‘width’ dimension.
1088      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1089      *      walking through input in the ‘height’ dimension.
1090      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1091      *      width.
1092      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1093      *      height.
1094      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1095      *      {@link FuseCode} values. Specifies the activation to
1096      *      invoke on the result.
1097      * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1098      *      Set to true to specify NCHW data layout for input0 and output0.
1099      *      Available since NNAPI feature level 3.
1100      *
1101      * Outputs:
1102      * * 0: The output 4-D tensor, of shape
1103      *      [batches, out_height, out_width, depth].
1104      *
1105      * Available since NNAPI feature level 1.
1106      */
1107     ANEURALNETWORKS_L2_POOL_2D = 12,
1108 
1109     /**
1110      * Applies Local Response Normalization along the depth dimension.
1111      *
1112      * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the
1113      * last dimension), and each vector is normalized independently. Within a
1114      * given vector, each component is divided by the weighted, squared sum of
1115      * inputs within depth_radius.
1116      *
1117      * The output is calculated using this formula:
1118      *
1119      *     sqr_sum[a, b, c, d] = sum(
1120      *         pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2))
1121      *     output = input / pow((bias + alpha * sqr_sum), beta)
1122      *
1123      * For input tensor with rank less than 4, independently normalizes each
1124      * 1-D slice along specified dimension.
1125      *
1126      * Supported tensor {@link OperandCode}:
1127      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1128      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1129      *
1130      * Supported tensor rank: up to 4
1131      * Tensors with rank less than 4 are only supported since NNAPI feature level 3.
1132      *
1133      * Inputs:
1134      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1135      *      the input.
1136      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the radius of
1137      *      the normalization window.
1138      * * 2: A scalar, specifying the bias, must not be zero.
1139      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias
1140      *      value must be of {@link ANEURALNETWORKS_FLOAT16}.
1141      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias
1142      *      value must be of {@link ANEURALNETWORKS_FLOAT32}.
1143      * * 3: A scalar, specifying the scale factor, alpha.
1144      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the
1145      *      alpha value must be of {@link ANEURALNETWORKS_FLOAT16}.
1146      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the
1147      *      alpha value must be of {@link ANEURALNETWORKS_FLOAT32}.
1148      * * 4: A scalar, specifying the exponent, beta.
1149      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta
1150      *      value must be of {@link ANEURALNETWORKS_FLOAT16}.
1151      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta
1152      *      value must be of {@link ANEURALNETWORKS_FLOAT32}.
1153      * * 5: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,
1154      *      specifying the dimension normalization would be performed on.
1155      *      Negative index is used to specify axis from the end (e.g. -1 for
1156      *      the last axis). Must be in the range [-n, n).
1157      *      Available since NNAPI feature level 3.
1158      *
1159      * Outputs:
1160      * * 0: The output tensor of same shape as input0.
1161      *
1162      * Available since NNAPI feature level 1.
1163      */
1164     ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13,
1165 
1166     /**
1167      * Computes sigmoid activation on the input tensor element-wise.
1168      *
1169      * The output is calculated using this formula:
1170      *
1171      *     output = 1 / (1 + exp(-input))
1172      *
1173      * Supported tensor {@link OperandCode}:
1174      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1175      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1176      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1177      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1178      *
1179      * Supported tensor rank: up to 4.
1180      *
1181      * Inputs:
1182      * * 0: A tensor, specifying the input.
1183      *      Since NNAPI feature level 3, this tensor may be zero-sized.
1184      *
1185      * Outputs:
1186      * * 0: The output tensor of same shape as input0.
1187      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
1188      *      the scale must be 1.f / 256 and the zeroPoint must be 0.
1189      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1190      *      the scale must be 1.f / 256 and the zeroPoint must be -128.
1191      *
1192      * Available since NNAPI feature level 1.
1193      */
1194     ANEURALNETWORKS_LOGISTIC = 14,
1195 
1196     /**
1197      * Projects an input to a bit vector via locality senstive hashing.
1198      *
1199      * Supported input tensor {@link OperandCode}:
1200      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1201      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1202      * * {@link ANEURALNETWORKS_TENSOR_INT32}
1203      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1204      *
1205      * Supported input tensor rank: from 1
1206      *
1207      * Inputs:
1208      * * 0: Hash functions. Dim.size == 2, DataType: Float.
1209      *      Tensor[0].Dim[0]: Number of hash functions.
1210      *      Tensor[0].Dim[1]: Number of projected output bits generated by each
1211      *      hash function.
1212      *      If the projection type is Sparse:
1213      *      Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32
1214      *
1215      * * 1: Input. Dim.size >= 1, no restriction on DataType.
1216      * * 2: Weight. Optional. Dim.size == 1, DataType: Float.
1217      *      If not set, each input element is considered to have the same weight
1218      *      of 1.0.
1219      *      Tensor[1].Dim[0] == Tensor[2].Dim[0]
1220      * * 3: Type:
1221      *        Sparse:
1222      *          Value LSHProjectionType_SPARSE(=3) (since NNAPI feature level 3).
1223      *          Computed bit vector is considered to be sparse.
1224      *          Each output element is an int32 made up of multiple bits
1225      *          computed from hash functions.
1226      *
1227      *          NOTE: To avoid collisions across hash functions, an offset value
1228      *          of k * (1 << Tensor[0].Dim[1]) will be added to each signature,
1229      *          where k is the index of the hash function.
1230      *
1231      *          Value LSHProjectionType_SPARSE_DEPRECATED(=1).
1232      *          Legacy behavior that does not include the offset value.
1233      *
1234      *        Dense:
1235      *          Value LSHProjectionType_DENSE(=2).
1236      *          Computed bit vector is considered to be dense. Each output
1237      *          element represents a bit and can take the value of either
1238      *          0 or 1.
1239      *
1240      * Outputs:
1241      * * 0: If the projection type is Sparse:
1242      *      Output.Dim == { Tensor[0].Dim[0] }
1243      *      A tensor of int32 that represents hash signatures.
1244      *
1245      *      If the projection type is Dense:
1246      *      Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
1247      *      A flattened tensor that represents projected bit vectors.
1248      *
1249      * Available since NNAPI feature level 1.
1250      * The offset value for sparse projections was added in NNAPI feature level 3.
1251      */
1252     ANEURALNETWORKS_LSH_PROJECTION = 15,
1253 
1254     /**
1255      * Performs a single time step in a Long Short-Term Memory (LSTM) layer
1256      *
1257      * The LSTM operation is described by the following equations.
1258      *
1259      * \f{eqnarray*}{
1260      * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\
1261      * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\
1262      * C_t =& clip(f_t \odot C_{t-1} + i_t \odot
1263      *        g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\
1264      * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\
1265      *      & & \\
1266      *      & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj})
1267      *      & if\ there\ is\ a\ projection; \\
1268      * h_t =& & \\
1269      *      & o_t \odot g(C_t) & otherwise. \\
1270      * \f}
1271      * Where:
1272      * * \f$x_t\f$ is the input,
1273      * * \f$i_t\f$ is the input gate,
1274      * * \f$f_t\f$ is the forget gate,
1275      * * \f$C_t\f$ is the cell state,
1276      * * \f$o_t\f$ is the output,
1277      * * \f$h_t\f$ is the output state,
1278      * * \f$\sigma\f$ is the logistic sigmoid function,
1279      * * \f$g\f$ is the cell input and cell output activation function, usually
1280      *   \f$tahn\f$,
1281      * * \f$W_{xi}\f$ is the input-to-input weight matrix,
1282      * * \f$W_{hi}\f$ is the recurrent to input weight matrix,
1283      * * \f$W_{ci}\f$ is the cell-to-input weight matrix,
1284      * * \f$b_i\f$ is the input gate bias,
1285      * * \f$W_{xf}\f$ is the input-to-forget weight matrix,
1286      * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix,
1287      * * \f$W_{cf}\f$ is the cell-to-forget weight matrix,
1288      * * \f$b_f\f$ is the forget gate bias,
1289      * * \f$W_{xc}\f$ is the input-to-cell weight matrix,
1290      * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix,
1291      * * \f$b_c\f$ is the cell bias,
1292      * * \f$W_{xo}\f$ is the input-to-output weight matrix,
1293      * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix,
1294      * * \f$W_{co}\f$ is the cell-to-output weight matrix,
1295      * * \f$b_o\f$ is the output gate bias,
1296      * * \f$W_{proj}\f$ is the projection weight matrix,
1297      * * \f$b_{proj}\f$ is the projection bias,
1298      * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and
1299      * * \f$t_{proj}\f$ is the threshold for clipping the projected output.
1300      * * \f$\odot\f$ is the
1301      *   <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)">
1302      *   Hadamard product</a> that takes two matrices and produces another
1303      *   matrix, each element of which is the product of the corresponding
1304      *   elements of the input matrices.
1305      *
1306      * Since NNAPI feature level 3 LSTM supports layer normalization.
1307      * In case layer normalization is used, the inputs to internal activation
1308      * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered
1309      * following an approach from section 3.1 from
1310      * https://arxiv.org/pdf/1607.06450.pdf
1311      *
1312      * The operation has the following independently optional inputs:
1313      * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights
1314      *   (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all
1315      *   have values or neither of them have values (i.e., all set to null). If
1316      *   they have values, the peephole optimization is used.
1317      * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights
1318      *   (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values,
1319      *   or none of them have values. If they have no values, coupling of input
1320      *   and forget gates (CIFG) is used, in which case the input gate
1321      *   (\f$i_t\f$) is calculated using the following equation instead.
1322      *   \f{eqnarray*}{
1323      *   i_t = 1 - f_t
1324      *   \f}
1325      *   In case peephole optimization is used and CIFG is not used
1326      *   cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the
1327      *   cell-to-input weights must have no value.
1328      * * The projection weights (\f$W_{proj}\f$) is required only for the
1329      *   recurrent projection layer, and should otherwise have no value.
1330      * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
1331      *   value if the recurrent projection layer exists, and should otherwise
1332      *   have no value.
1333      * * (NNAPI feature level 3 or later) The four layer normalization weights either all have
1334      *   values or none of them have values. Additionally, if CIFG is used,
1335      *   input layer normalization weights tensor is omitted and the other layer
1336      *   normalization weights either all have values or none of them have
1337      *   values. Layer normalization is used when the values of all the layer
1338      *   normalization weights are present.
1339      *
1340      * References:
1341      *
1342      * The default non-peephole non-CIFG implementation is based on:
1343      * http://www.bioinf.jku.at/publications/older/2604.pdf
1344      * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural
1345      * Computation, 9(8):1735-1780, 1997.
1346      *
1347      * The peephole implementation and projection layer is based on:
1348      * https://research.google.com/pubs/archive/43905.pdf
1349      * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory
1350      * recurrent neural network architectures for large scale acoustic
1351      * modeling." INTERSPEECH, 2014.
1352      * (However, the concept of peephole optimization was introduced in work
1353      * prior to this paper.)
1354      *
1355      * The coupling of input and forget gate (CIFG) is based on:
1356      * http://arxiv.org/pdf/1503.04069.pdf
1357      * Greff et al. "LSTM: A Search Space Odyssey"
1358      *
1359      * The layer normalization is based on:
1360      * https://arxiv.org/pdf/1607.06450.pdf
1361      * Jimmy Ba et al. "Layer Normalization"
1362      *
1363      * Supported tensor {@link OperandCode}:
1364      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1365      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1366      *
1367      * All input and output tensors must be of the same type.
1368      *
1369      * Inputs:
1370      * * 0: The input (\f$x_t\f$).
1371      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1372      *      corresponds to the batching dimension, and “input_size” is the size
1373      *      of the input.
1374      * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
1375      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1376      *      corresponds to the number of cell units.
1377      * * 2: The input-to-forget weights (\f$W_{xf}\f$).
1378      *      A 2-D tensor of shape [num_units, input_size].
1379      * * 3: The input-to-cell weights (\f$W_{xc}\f$).
1380      *      A 2-D tensor of shape [num_units, input_size].
1381      * * 4: The input-to-output weights (\f$W_{xo}\f$).
1382      *      A 2-D tensor of shape [num_units, input_size].
1383      * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
1384      *      A 2-D tensor of shape [num_units, output_size], where “output_size”
1385      *      corresponds to either the number of cell units (i.e., “num_units”),
1386      *      or the second dimension of the “projection_weights”, if defined.
1387      * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
1388      *      A 2-D tensor of shape [num_units, output_size].
1389      * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
1390      *      A 2-D tensor of shape [num_units, output_size].
1391      * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
1392      *      A 2-D tensor of shape [num_units, output_size].
1393      * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
1394      *      A 1-D tensor of shape [num_units].
1395      * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
1396      *      A 1-D tensor of shape [num_units].
1397      * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
1398      *      A 1-D tensor of shape [num_units].
1399      * * 12:The input gate bias (\f$b_i\f$). Optional.
1400      *      A 1-D tensor of shape [num_units].
1401      * * 13:The forget gate bias (\f$b_f\f$).
1402      *      A 1-D tensor of shape [num_units].
1403      * * 14:The cell bias (\f$b_c\f$).
1404      *      A 1-D tensor of shape [num_units].
1405      * * 15:The output gate bias (\f$b_o\f$).
1406      *      A 1-D tensor of shape [num_units].
1407      * * 16:The projection weights (\f$W_{proj}\f$). Optional.
1408      *      A 2-D tensor of shape [output_size, num_units].
1409      * * 17:The projection bias (\f$b_{proj}\f$). Optional.
1410      *      A 1-D tensor of shape [output_size].
1411      * * 18:The output state (in) (\f$h_{t-1}\f$).
1412      *      A 2-D tensor of shape [batch_size, output_size].
1413      * * 19:The cell state (in) (\f$C_{t-1}\f$).
1414      *      A 2-D tensor of shape [batch_size, num_units].
1415      * * 20:The activation function (\f$g\f$).
1416      *      A value indicating the activation function:
1417      *      <ul>
1418      *      <li>0: None;
1419      *      <li>1: Relu;
1420      *      <li>3: Relu6;
1421      *      <li>4: Tanh;
1422      *      <li>6: Sigmoid.
1423      *      </ul>
1424      * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
1425      *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
1426      *      then clipping is disabled.
1427      *      Until NNAPI feature level 3 this scalar must be of type {@link
1428      *      ANEURALNETWORKS_FLOAT32}. Since NNAPI feature level 3, if all the input
1429      *      tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this
1430      *      scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
1431      *      otherwise if all the input tensors have the type {@link
1432      *      ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link
1433      *      ANEURALNETWORKS_FLOAT16}.
1434      * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
1435      *      projection layer, such that values are bound within
1436      *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1437      *      Until NNAPI feature level 3 this scalar must be of type {@link
1438      *      ANEURALNETWORKS_FLOAT32}. Since NNAPI feature level 3, if all the input
1439      *      tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this
1440      *      scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
1441      *      otherwise if all the input tensors have the type {@link
1442      *      ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link
1443      *      ANEURALNETWORKS_FLOAT16}.
1444      * Since NNAPI feature level 3 there are additional inputs to this op:
1445      * * 23:The input layer normalization weights.
1446      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1447      *      to activation at input gate.
1448      * * 24:The forget layer normalization weights.
1449      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1450      *      to activation at forget gate.
1451      * * 25:The cell layer normalization weights.
1452      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1453      *      to activation at cell gate.
1454      * * 26:The output layer normalization weights.
1455      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
1456      *      to activation at output gate.
1457      *
1458      * Outputs:
1459      * * 0: The scratch buffer.
1460      *      A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or
1461      *      [batch_size, num_units * 4] without CIFG.
1462      * * 1: The output state (out) (\f$h_t\f$).
1463      *      A 2-D tensor of shape [batch_size, output_size].
1464      * * 2: The cell state (out) (\f$C_t\f$).
1465      *      A 2-D tensor of shape [batch_size, num_units].
1466      * * 3: The output (\f$o_t\f$).
1467      *      A 2-D tensor of shape [batch_size, output_size]. This is effectively
1468      *      the same as the current “output state (out)” value.
1469      *
1470      * Available since NNAPI feature level 1.
1471      */
1472     ANEURALNETWORKS_LSTM = 16,
1473 
1474     /**
1475      * Performs an 2-D max pooling operation.
1476      *
1477      * The output dimensions are functions of the filter dimensions, stride, and
1478      * padding.
1479      *
1480      * The values in the output tensor are computed as:
1481      *
1482      *     output[b, i, j, channel] =
1483      *         max_{di, dj} (
1484      *             input[b, strides[1] * i + di, strides[2] * j + dj, channel]
1485      *         )
1486      *
1487      * Supported tensor {@link OperandCode}:
1488      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1489      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1490      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1491      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1492      *
1493      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1494      * With the default data layout NHWC, the data is stored in the order of:
1495      * [batch, height, width, channels]. Alternatively, the data layout could
1496      * be NCHW, the data storage order of: [batch, channels, height, width].
1497      * NCHW is supported since NNAPI feature level 3.
1498      *
1499      * Both explicit padding and implicit padding are supported.
1500      *
1501      * Inputs (explicit padding):
1502      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1503      *      the input.
1504      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1505      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1506      *      the left, in the ‘width’ dimension.
1507      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1508      *      the right, in the ‘width’ dimension.
1509      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1510      *      the top, in the ‘height’ dimension.
1511      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
1512      *      the bottom, in the ‘height’ dimension.
1513      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1514      *      walking through input in the ‘width’ dimension.
1515      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1516      *      walking through input in the ‘height’ dimension.
1517      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1518      *      width.
1519      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1520      *      height.
1521      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1522      *      {@link FuseCode} values. Specifies the activation to
1523      *      invoke on the result.
1524      * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1525      *       Set to true to specify NCHW data layout for input0 and output0.
1526      *       Available since NNAPI feature level 3.
1527      *
1528      * Inputs (implicit padding):
1529      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1530      *      the input.
1531      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1532      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
1533      *      padding scheme, has to be one of the
1534      *      {@link PaddingCode} values.
1535      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1536      *      walking through input in the ‘width’ dimension.
1537      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
1538      *      walking through input in the ‘height’ dimension.
1539      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1540      *      width.
1541      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter
1542      *      height.
1543      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1544      *      {@link FuseCode} values. Specifies the activation to
1545      *      invoke on the result.
1546      * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1547      *      Set to true to specify NCHW data layout for input0 and output0.
1548      *      Available since NNAPI feature level 3.
1549      *
1550      * Outputs:
1551      * * 0: The output 4-D tensor, of shape
1552      *      [batches, out_height, out_width, depth].
1553      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1554      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1555      *      the scale and zeroPoint must be the same as input0.
1556      *
1557      * Available since NNAPI feature level 1.
1558      */
1559     ANEURALNETWORKS_MAX_POOL_2D = 17,
1560 
1561     /**
1562      * Multiplies two tensors, element-wise.
1563      *
1564      * Takes two input tensors of identical {@link OperandCode} and compatible
1565      * dimensions. The output is the product of both input tensors, optionally
1566      * modified by an activation function.
1567      *
1568      * Two dimensions are compatible when:
1569      *     1. they are equal, or
1570      *     2. one of them is 1
1571      *
1572      * The size of the resulting output is the maximum size along each dimension
1573      * of the input operands. It starts with the trailing dimensions, and works
1574      * its way forward.
1575      *
1576      * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero
1577      * dimension is only compatible with 0 or 1. The size of the output
1578      * dimension is zero if either of corresponding input dimension is zero.
1579      *
1580      * Supported tensor {@link OperandCode}:
1581      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1582      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1583      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1584      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1585      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
1586      *
1587      * Supported tensor rank: up to 4
1588      *
1589      * Inputs:
1590      * * 0: A tensor.
1591      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
1592      *      as input0.
1593      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
1594      *      {@link FuseCode} values. Specifies the activation to
1595      *      invoke on the result.
1596      *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
1597      *      the {@link FuseCode} must be "NONE".
1598      *
1599      * Outputs:
1600      * * 0: The product, a tensor of the same {@link OperandCode} as input0.
1601      *      For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1602      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1603      *      the following condition must be satisfied:
1604      *      output_scale > input1_scale * input2_scale.
1605      *
1606      * Available since NNAPI feature level 1.
1607      */
1608     ANEURALNETWORKS_MUL = 18,
1609 
1610     /**
1611      * Computes rectified linear activation on the input tensor element-wise.
1612      *
1613      * The output is calculated using this formula:
1614      *
1615      *     output = max(0, input)
1616      *
1617      * Supported tensor {@link OperandCode}:
1618      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1619      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1620      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1621      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1622      *
1623      * Supported tensor rank: up to 4.
1624      *
1625      * Inputs:
1626      * * 0: A tensor, specifying the input.
1627      *      Since NNAPI feature level 3, this tensor may be zero-sized.
1628      *
1629      * Outputs:
1630      * * 0: The output tensor of same shape as input0.
1631      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1632      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1633      *      the scale and zeroPoint must be the same as input0.
1634      *
1635      * Available since NNAPI feature level 1.
1636      */
1637     ANEURALNETWORKS_RELU = 19,
1638 
1639     /**
1640      * Computes rectified linear 1 activation on the input tensor element-wise.
1641      *
1642      * The output is calculated using this formula:
1643      *
1644      *     output = min(1.f, max(-1.f, input))
1645      *
1646      * Supported tensor {@link OperandCode}:
1647      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1648      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1649      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1650      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1651      *
1652      * Supported tensor rank: up to 4.
1653      *
1654      * Inputs:
1655      * * 0: A tensor, specifying the input.
1656      *      Since NNAPI feature level 3, this tensor may be zero-sized.
1657      *
1658      * Outputs:
1659      * * 0: The output tensor of the same shape as input0.
1660      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1661      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1662      *      the scale and zeroPoint must be the same as input0.
1663      *
1664      * Available since NNAPI feature level 1.
1665      */
1666     ANEURALNETWORKS_RELU1 = 20,
1667 
1668     /**
1669      * Computes rectified linear 6 activation on the input tensor element-wise.
1670      *
1671      * The output is calculated using this formula:
1672      *
1673      *     output = min(6, max(0, input))
1674      *
1675      * Supported tensor {@link OperandCode}:
1676      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1677      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1678      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1679      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1680      *
1681      * Supported tensor rank: up to 4.
1682      *
1683      * Inputs:
1684      * * 0: A tensor, specifying the input.
1685      *      Since NNAPI feature level 3, this tensor may be zero-sized.
1686      *
1687      * Outputs:
1688      * * 0: The output tensor of same shape as input0.
1689      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1690      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1691      *      the scale and zeroPoint must be the same as input0.
1692      *
1693      * Available since NNAPI feature level 1.
1694      */
1695     ANEURALNETWORKS_RELU6 = 21,
1696 
1697     /**
1698      * Reshapes a tensor.
1699      *
1700      * Given tensor, this operation returns a tensor that has the same values as
1701      * tensor, but with a newly specified shape.
1702      *
1703      * Supported tensor {@link OperandCode}:
1704      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1705      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1706      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1707      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1708      *
1709      * Supported tensor rank: up to 4.
1710      *
1711      * Inputs:
1712      * * 0: A tensor, specifying the tensor to be reshaped.
1713      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, defining the
1714      *      shape of the output tensor. The number of elements implied by shape
1715      *      must be the same as the number of elements in the input tensor.
1716      *
1717      *      If one component of shape is the special value -1, the size of that
1718      *      dimension is computed so that the total size remains constant. In
1719      *      particular, a shape of [-1] flattens into 1-D. At most one component
1720      *      of shape can be -1.
1721      *
1722      * Outputs:
1723      * * 0: The output tensor, of shape specified by the input shape.
1724      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1725      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1726      *      the scale and zeroPoint must be the same as input0.
1727      *
1728      * Available since NNAPI feature level 1.
1729      */
1730     ANEURALNETWORKS_RESHAPE = 22,
1731 
1732     /**
1733      * Resizes images to given size using the bilinear interpretation.
1734      *
1735      * Resized images must be distorted if their output aspect ratio is not the
1736      * same as input aspect ratio. The corner pixels of output may not be the
1737      * same as corner pixels of input.
1738      *
1739      * Supported tensor {@link OperandCode}:
1740      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1741      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1742      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
1743      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1744      *
1745      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1746      * With the default data layout NHWC, the data is stored in the order of:
1747      * [batch, height, width, channels]. Alternatively, the data layout could
1748      * be NCHW, the data storage order of: [batch, channels, height, width].
1749      * NCHW is supported since NNAPI feature level 3.
1750      *
1751      * Both resizing by shape and resizing by scale are supported.
1752      *
1753      * Inputs (resizing by shape):
1754      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1755      *      the input.
1756      *      Since NNAPI feature level 3, zero batches is supported for this tensor.
1757      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
1758      *      width of the output tensor.
1759      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
1760      *      height of the output tensor.
1761      * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1762      *      Set to true to specify NCHW data layout for input0 and output0.
1763      *      Available since NNAPI feature level 3.
1764      * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
1765      *      scalar, default to false.  If True, the centers of the 4 corner
1766      *      pixels of the input and output tensors are aligned, preserving the
1767      *      values at the corner pixels.
1768      *      Available since NNAPI feature level 4.
1769      * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
1770      *      scalar, default to false. If True, the pixel centers are assumed to
1771      *      be at (0.5, 0.5). This is the default behavior of image.resize in
1772      *      TF 2.0. If this parameter is True, then align_corners parameter
1773      *      must be False.
1774      *      Available since NNAPI feature level 4.
1775      *
1776      * Inputs (resizing by scale, since NNAPI feature level 3):
1777      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
1778      *      the input. Zero batches is supported for this tensor.
1779      * * 1: A scalar, specifying width_scale, the scaling factor of the width
1780      *      dimension from the input tensor to the output tensor. The output
1781      *      width is calculated as new_width = floor(width * width_scale).
1782      *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
1783      *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
1784      *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
1785      * * 2: A scalar, specifying height_scale, the scaling factor of the height
1786      *      dimension from the input tensor to the output tensor. The output
1787      *      height is calculated as new_height = floor(height * height_scale).
1788      *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
1789      *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
1790      *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
1791      * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1792      *      Set to true to specify NCHW data layout for input0 and output0.
1793      * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
1794      *      scalar, default to false.  If True, the centers of the 4 corner
1795      *      pixels of the input and output tensors are aligned, preserving the
1796      *      values at the corner pixels.
1797      *      Available since NNAPI feature level 4.
1798      * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
1799      *      scalar, default to false. If True, the pixel centers are assumed to
1800      *      be at (0.5, 0.5). This is the default behavior of image.resize in
1801      *      TF 2.0. If this parameter is True, then align_corners parameter
1802      *      must be False.
1803      *      Available since NNAPI feature level 4.
1804      *
1805      * Outputs:
1806      * * 0: The output 4-D tensor, of shape
1807      *      [batches, new_height, new_width, depth].
1808      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1809      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1810      *      the scale and zeroPoint must be the same as input0.
1811      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
1812      *      the scale and zeroPoint must be the same as input0.
1813      *
1814      * Available since NNAPI feature level 1.
1815      */
1816     ANEURALNETWORKS_RESIZE_BILINEAR = 23,
1817 
1818     /**
1819      * A basic recurrent neural network layer.
1820      *
1821      * This layer implements the operation:
1822      * outputs = state = activation(inputs * input_weights +
1823      *                              state * recurrent_weights + bias)
1824      *
1825      * Where:
1826      * * “input_weights” is a weight matrix that multiplies the inputs;
1827      * * “recurrent_weights” is a weight matrix that multiplies the current
1828      *    “state” which itself is the output from the previous time step
1829      *    computation;
1830      * * “bias” is a bias vector (added to each output vector in the batch);
1831      * * “activation” is the function passed as the “fused_activation_function”
1832      *   argument (if not “NONE”).
1833      *
1834      * Supported tensor {@link OperandCode}:
1835      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1836      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1837      *
1838      * The input tensors must all be the same type.
1839      *
1840      * Inputs:
1841      * * 0: input.
1842      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
1843      *      corresponds to the batching dimension, and “input_size” is the size
1844      *      of the input.
1845      * * 1: weights.
1846      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
1847      *      corresponds to the number of units.
1848      * * 2: recurrent_weights.
1849      *      A 2-D tensor of shape [num_units, num_units], with columns
1850      *      corresponding to the weights from each unit.
1851      * * 3: bias.
1852      *      A 1-D tensor of shape [num_units].
1853      * * 4: hidden state (in).
1854      *      A 2-D tensor of shape [batch_size, num_units].
1855      * * 5: fused_activation_function.
1856      *      An optional {@link FuseCode} value indicating the
1857      *      activation function. If “NONE” is specified then it results in a
1858      *      linear activation.
1859      *
1860      * Outputs:
1861      * * 0: hidden state (out).
1862      *      A 2-D tensor of shape [batch_size, num_units].
1863      *
1864      * * 1: output.
1865      *      A 2-D tensor of shape [batch_size, num_units]. This is effectively
1866      *      the same as the current state value.
1867      *
1868      * Available since NNAPI feature level 1.
1869      */
1870     ANEURALNETWORKS_RNN = 24,
1871 
1872     /**
1873      * Computes the softmax activation on the input tensor element-wise, per
1874      * batch, by normalizing the input vector so the maximum coefficient is
1875      * zero.
1876      *
1877      * The output is calculated using this formula:
1878      *
1879      *     output[batch, i] =
1880      *         exp((input[batch, i] - max(input[batch, :])) * beta) /
1881      *         sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
1882      *
1883      * For input tensor with rank other than 2, the activation will be applied
1884      * independently on each 1-D slice along specified dimension.
1885      *
1886      * Supported tensor {@link OperandCode}:
1887      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1888      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1889      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1890      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1891      *
1892      * Supported tensor rank: up to 4.
1893      * Tensors with rank other than 2 or 4 are only supported since NNAPI feature level 3.
1894      *
1895      * Inputs:
1896      * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
1897      *      Since NNAPI feature level 3, this tensor may be zero-sized.
1898      * * 1: A scalar, specifying the positive scaling factor for the exponent,
1899      *      beta. If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT32},
1900      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
1901      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scalar
1902      *      must be of {@link ANEURALNETWORKS_FLOAT32}.
1903      *      If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, then the
1904      *      scalar must be of {@link ANEURALNETWORKS_FLOAT16}.
1905      * * 2: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,
1906      *      specifying the dimension the activation would be performed on.
1907      *      Negative index is used to specify axis from the end (e.g. -1 for
1908      *      the last axis). Must be in the range [-n, n).
1909      *      Available since NNAPI feature level 3.
1910      *
1911      * Outputs:
1912      * * 0: The output tensor of same shape as input0.
1913      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
1914      *      the scale must be 1.f / 256 and the zeroPoint must be 0.
1915      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
1916      *      the scale must be 1.f / 256 and the zeroPoint must be -128.
1917      *
1918      * Available since NNAPI feature level 1.
1919      */
1920     ANEURALNETWORKS_SOFTMAX = 25,
1921 
1922     /**
1923      * Rearranges blocks of spatial data, into depth.
1924      *
1925      * More specifically, this op outputs a copy of the input tensor where
1926      * values from the height and width dimensions are moved to the depth
1927      * dimension. The value block_size indicates the input block size and how
1928      * the data is moved.
1929      *
1930      * Chunks of data of size block_size * block_size from depth are rearranged
1931      * into non-overlapping blocks of size block_size x block_size.
1932      *
1933      * The depth of the output tensor is input_depth * block_size * block_size.
1934      * The input tensor's height and width must be divisible by block_size.
1935      *
1936      * Supported tensor {@link OperandCode}:
1937      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
1938      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
1939      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
1940      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
1941      *
1942      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
1943      * With the default data layout NHWC, the data is stored in the order of:
1944      * [batch, height, width, channels]. Alternatively, the data layout could
1945      * be NCHW, the data storage order of: [batch, channels, height, width].
1946      * NCHW is supported since NNAPI feature level 3.
1947      *
1948      * Inputs:
1949      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
1950      *      specifying the input.
1951      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.
1952      *      block_size must be >=1 and block_size must be a divisor of both the
1953      *      input height and width.
1954      * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
1955      *      Set to true to specify NCHW data layout for input0 and output0.
1956      *      Available since NNAPI feature level 3.
1957      *
1958      * Outputs:
1959      * * 0: The output 4-D tensor, of shape [batches, height/block_size,
1960      *      width/block_size, depth_in*block_size*block_size].
1961      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
1962      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
1963      *      the scale and zeroPoint must be the same as input0.
1964      *
1965      * Available since NNAPI feature level 1.
1966      */
1967     ANEURALNETWORKS_SPACE_TO_DEPTH = 26,
1968 
1969     /**
1970      * SVDF op is a kind of stateful layer derived from the notion that a
1971      * densely connected layer that's processing a sequence of input frames can
1972      * be approximated by using a singular value decomposition of each of its
1973      * nodes. The implementation is based on:
1974      *
1975      * https://research.google.com/pubs/archive/43813.pdf
1976      *
1977      * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada.
1978      * “Compressing Deep Neural Networks using a Rank-Constrained Topology”.
1979      * INTERSPEECH, 2015.
1980      *
1981      * It processes the incoming input using a 2-stage filtering mechanism:
1982      * * stage 1 performs filtering on the "features" dimension, whose outputs
1983      *   get pushed into a memory of fixed-size memory_size.
1984      * * stage 2 performs filtering on the "time" dimension of the memory_size
1985      *   memoized outputs of stage 1.
1986      *
1987      * Specifically, for rank 1, this layer implements the operation:
1988      *
1989      *     memory = push(conv1d(inputs, weights_feature, feature_dim,
1990      *                          "ANEURALNETWORKS_PADDING_VALID"));
1991      *     outputs = activation(memory * weights_time + bias);
1992      *
1993      * Where:
1994      * * “weights_feature” is a weights matrix that processes the inputs (by
1995      *   convolving the input with every “feature filter”), and whose outputs
1996      *   get pushed, stacked in order, into the fixed-size “memory” (the oldest
1997      *   entry gets dropped);
1998      * * “weights_time” is a weights matrix that processes the “memory” (by a
1999      *   batched matrix multiplication on the num_units);
2000      * * “bias” is an optional bias vector (added to each output vector in the
2001      *   batch); and
2002      * * “activation” is the function passed as the “fused_activation_function”
2003      *   argument (if not “NONE”).
2004      *
2005      * Each rank adds a dimension to the weights matrices by means of stacking
2006      * the filters.
2007      *
2008      * Supported tensor {@link OperandCode}:
2009      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2010      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2011      *
2012      * All input tensors must be the same type.
2013      *
2014      * Inputs:
2015      * * 0: input.
2016      *      A 2-D tensor of shape [batch_size, input_size], where “batch_size”
2017      *      corresponds to the batching dimension, and “input_size” is the size
2018      *      of the input.
2019      * * 1: weights_feature.
2020      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
2021      *      corresponds to the number of units.
2022      * * 2: weights_time.
2023      *      A 2-D tensor of shape [num_units, memory_size], where “memory_size”
2024      *      corresponds to the fixed-size of the memory.
2025      * * 3: bias.
2026      *      An optional 1-D tensor of shape [num_units].
2027      * * 4: state (in).
2028      *      A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank].
2029      * * 5: rank.
2030      *      The rank of the SVD approximation.
2031      * * 6: fused_activation_function.
2032      *      An optional {@link FuseCode} value indicating the
2033      *      activation function. If “NONE” is specified then it results in a
2034      *      linear activation.
2035      *
2036      * Outputs:
2037      * * 0: state (out).
2038      *      A 2-D tensor of the same {@link OperandCode} as the inputs, with shape
2039      *      [batch_size, (memory_size - 1) * num_units * rank].
2040      * * 1: output.
2041      *      A 2-D tensor of the same {@link OperandCode} as the inputs, with shape
2042      *      [batch_size, num_units].
2043      *
2044      * Available since NNAPI feature level 1.
2045      */
2046     ANEURALNETWORKS_SVDF = 27,
2047 
2048     /**
2049      * Computes hyperbolic tangent of input tensor element-wise.
2050      *
2051      * The output is calculated using this formula:
2052      *
2053      *     output = tanh(input)
2054      *
2055      * Supported tensor {@link OperandCode}:
2056      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2057      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2058      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
2059      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2060      *
2061      * Supported tensor rank: up to 4.
2062      *
2063      * Inputs:
2064      * * 0: A tensor, specifying the input.
2065      *      Since NNAPI feature level 3, this tensor may be zero-sized.
2066      *
2067      * Outputs:
2068      * * 0: The output tensor of same shape as input0.
2069      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
2070      *      the scale must be 1.f / 128 and the zeroPoint must be 128.
2071      *      For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
2072      *      the scale must be 1.f / 128 and the zeroPoint must be 0.
2073      *
2074      * Available since NNAPI feature level 1.
2075      */
2076     ANEURALNETWORKS_TANH = 28,
2077 
2078     // Operations below are available since NNAPI feature level 2.
2079 
2080     /**
2081      * BatchToSpace for N-dimensional tensors.
2082      *
2083      * This operation reshapes the batch dimension (dimension 0) into M + 1
2084      * dimensions of shape block_shape + [batch], interleaves these blocks back
2085      * into the grid defined by the spatial dimensions [1, ..., M], to obtain a
2086      * result with the same rank as the input.
2087      *
2088      * This is the reverse of SpaceToBatch.
2089      *
2090      * Supported tensor {@link OperandCode}:
2091      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2092      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2093      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2094      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2095      *
2096      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
2097      * With the default data layout NHWC, the data is stored in the order of:
2098      * [batch, height, width, channels]. Alternatively, the data layout could
2099      * be NCHW, the data storage order of: [batch, channels, height, width].
2100      * NCHW is supported since NNAPI feature level 3.
2101      *
2102      * Inputs:
2103      * * 0: An n-D tensor, specifying the tensor to be reshaped
2104      * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block
2105      *      sizes for each spatial dimension of the input tensor. All values
2106      *      must be >= 1.
2107      * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
2108      *      Set to true to specify NCHW data layout for input0 and output0.
2109      *      Available since API level 29.
2110      *
2111      * Outputs:
2112      * * 0: A tensor of the same {@link OperandCode} as input0.
2113      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2114      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2115      *      the scale and zeroPoint must be the same as input0.
2116      *
2117      * Available since NNAPI feature level 2.
2118      */
2119     ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29,
2120 
2121     /**
2122      * Element-wise division of two tensors.
2123      *
2124      * Takes two input tensors of identical {@link OperandCode} and compatible
2125      * dimensions. The output is the result of dividing the first input tensor
2126      * by the second, optionally modified by an activation function.
2127      *
2128      * For inputs of {@link ANEURALNETWORKS_TENSOR_INT32}, performs
2129      * "floor division" ("//" in Python). For example,
2130      *     5 // 2 = 2
2131      *    -5 // 2 = -3
2132      *
2133      * Two dimensions are compatible when:
2134      *     1. they are equal, or
2135      *     2. one of them is 1
2136      *
2137      * The size of the output is the maximum size along each dimension of the
2138      * input operands. It starts with the trailing dimensions, and works its way
2139      * forward.
2140      *
2141      * Example:
2142      *     input1.dimension =    {4, 1, 2}
2143      *     input2.dimension = {5, 4, 3, 1}
2144      *     output.dimension = {5, 4, 3, 2}
2145      *
2146      * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero
2147      * dimension is only compatible with 0 or 1. The size of the output
2148      * dimension is zero if either of corresponding input dimension is zero.
2149      *
2150      * Supported tensor {@link OperandCode}:
2151      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2152      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2153      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
2154      *
2155      * Supported tensor rank: up to 4
2156      *
2157      * Inputs:
2158      * * 0: An n-D tensor, specifying the first input.
2159      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
2160      *      as input0.
2161      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
2162      *      {@link FuseCode} values. Specifies the activation to
2163      *      invoke on the result.
2164      *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
2165      *      the {@link FuseCode} must be "NONE".
2166      *
2167      * Outputs:
2168      * * 0: A tensor of the same {@link OperandCode} as input0.
2169      *
2170      * Available since NNAPI feature level 2.
2171      */
2172     ANEURALNETWORKS_DIV = 30,
2173 
2174     /**
2175      * Computes the mean of elements across dimensions of a tensor.
2176      *
2177      * Reduces the input tensor along the given dimensions to reduce. Unless
2178      * keep_dims is true, the rank of the tensor is reduced by 1 for each entry
2179      * in axis. If keep_dims is true, the reduced dimensions are retained with
2180      * length 1.
2181      *
2182      * Supported tensor {@link OperandCode}:
2183      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2184      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2185      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2186      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2187      *
2188      * Supported tensor rank: up to 4
2189      *
2190      * Inputs:
2191      * * 0: A tensor, specifying the input.
2192      * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
2193      *      to reduce. Must be in the range
2194      *      [-rank(input_tensor), rank(input_tensor)).
2195      *
2196      *      NOTE: When the operation was introduced, the documentation
2197      *      incorrectly stated that if dimensions were empty, the operation
2198      *      would reduce across all dimensions. This behavior was never
2199      *      implemented.
2200      *
2201      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, keep_dims. If positive,
2202      *      retains reduced dimensions with length 1.
2203      *
2204      * Outputs:
2205      * * 0: A tensor of the same {@link OperandCode} as input0.
2206      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2207      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2208      *      the scale and zeroPoint must be the same as input0.
2209      *      If all dimensions are reduced and keep_dims is false, the output
2210      *      shape is [1].
2211      *
2212      * Available since NNAPI feature level 2.
2213      */
2214     ANEURALNETWORKS_MEAN = 31,
2215 
2216     /**
2217      * Pads a tensor.
2218      *
2219      * This operation pads a tensor according to the specified paddings.
2220      *
2221      * Supported tensor {@link OperandCode}:
2222      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2223      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2224      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2225      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2226      *   (full support since NNAPI feature level 3, see the output section)
2227      *
2228      * Supported tensor rank: up to 4
2229      *
2230      * Inputs:
2231      * * 0: An n-D tensor, specifying the tensor to be padded.
2232      * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
2233      *      for each spatial dimension of the input tensor. The shape of the
2234      *      tensor must be {rank(input0), 2}.
2235      *      padding[i, 0] specifies the number of elements to be padded in the
2236      *      front of dimension i.
2237      *      padding[i, 1] specifies the number of elements to be padded after the
2238      *      end of dimension i.
2239      *
2240      * Outputs:
2241      * * 0: A tensor of the same {@link OperandCode} as input0. The
2242      *      output tensor has the same rank as input0, and each
2243      *      dimension of the output tensor has the same size as the
2244      *      corresponding dimension of the input tensor plus the size
2245      *      of the padding:
2246      *          output0.dimension[i] =
2247      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
2248      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2249      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2250      *      the scale and zeroPoint must be the same as input0.
2251      *
2252      *      NOTE: Before NNAPI feature level 3, the pad value for
2253      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.
2254      *      Since NNAPI feature level 3, the pad value is always the logical zero.
2255      *
2256      * Available since NNAPI feature level 2.
2257      */
2258     ANEURALNETWORKS_PAD = 32,
2259 
2260     /**
2261      * SpaceToBatch for N-Dimensional tensors.
2262      *
2263      * This operation divides "spatial" dimensions [1, ..., M] of the input into
2264      * a grid of blocks of shape block_shape, and interleaves these blocks with
2265      * the "batch" dimension (0) such that in the output, the spatial dimensions
2266      * [1, ..., M] correspond to the position within the grid, and the batch
2267      * dimension combines both the position within a spatial block and the
2268      * original batch position. Prior to division into blocks, the spatial
2269      * dimensions of the input are optionally zero padded according to paddings.
2270      *
2271      * Supported tensor {@link OperandCode}:
2272      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2273      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2274      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2275      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2276      *   (full support since NNAPI feature level 3, see the output section)
2277      *
2278      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
2279      * With the default data layout NHWC, the data is stored in the order of:
2280      * [batch, height, width, channels]. Alternatively, the data layout could
2281      * be NCHW, the data storage order of: [batch, channels, height, width].
2282      * NCHW is supported since NNAPI feature level 3.
2283      *
2284      * Inputs:
2285      * * 0: An n-D tensor, specifying the input.
2286      * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block
2287      *      sizes for each spatial dimension of the input tensor. All values
2288      *      must be >= 1.
2289      * * 2: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
2290      *      for each spatial dimension of the input tensor. All values must be
2291      *      >= 0. The shape of the tensor must be {M, 2}, where M is the number
2292      *      of spatial dimensions.
2293      *      padding[i, 0] specifies the number of element to be padded in the
2294      *      front of dimension i.
2295      *      padding[i, 1] specifies the number of element to be padded after the
2296      *      end of dimension i.
2297      * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
2298      *      Set to true to specify NCHW data layout for input0 and output0.
2299      *      Available since NNAPI feature level 3.
2300      *
2301      * Outputs:
2302      * * 0: A tensor of the same {@link OperandCode} as input0.
2303      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2304      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2305      *      the scale and zeroPoint must be the same as input0.
2306      *
2307      *      NOTE: Before NNAPI feature level 3, the pad value for
2308      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.
2309      *      Since NNAPI feature level 3, the pad value is always the logical zero.
2310      *
2311      * Available since NNAPI feature level 2.
2312      */
2313     ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33,
2314 
2315     /**
2316      * Removes dimensions of size 1 from the shape of a tensor.
2317      *
2318      * Given a tensor input, this operation returns a tensor of the same
2319      * {@link OperandCode} with all dimensions of size 1 removed. If you don't
2320      * want to remove all size 1 dimensions, you can remove specific size 1
2321      * dimensions by specifying the axes (input1).
2322      *
2323      * Supported tensor {@link OperandCode}:
2324      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2325      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2326      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2327      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2328      *
2329      * Supported tensor rank: up to 4
2330      *
2331      * Inputs:
2332      * * 0: An n-D tensor, the tensor to be squeezed.
2333      * * 1: An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2334      *      dimensions to squeeze. If specified only squeezes the dimensions
2335      *      listed. Otherwise, squeezes all dimensions. The dimension index
2336      *      starts at 0. An error must be reported if squeezing a dimension that
2337      *      is not 1.
2338      *
2339      * Outputs:
2340      * * 0: A tensor of the same {@link OperandCode} as input0. Contains the
2341      *      same data as input, but has one or more dimensions of size 1
2342      *      removed.
2343      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2344      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2345      *      the scale and zeroPoint must be the same as input0.
2346      *      If all input dimensions are equal to 1 and are to be squeezed, the
2347      *      output shape is [1].
2348      *
2349      * Available since NNAPI feature level 2.
2350      */
2351     ANEURALNETWORKS_SQUEEZE = 34,
2352 
2353     /**
2354      * Extracts a strided slice of a tensor.
2355      *
2356      * Roughly speaking, this op extracts a slice of size (end - begin) / stride
2357      * from the given input tensor. Starting at the location specified by begin
2358      * the slice continues by adding stride to the index until all dimensions
2359      * are not less than end. Note that a stride can be negative, which causes a
2360      * reverse slice.
2361      *
2362      * Supported tensor {@link OperandCode}:
2363      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2364      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2365      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2366      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2367      *
2368      * Supported tensor rank: up to 4
2369      *
2370      * Inputs:
2371      * * 0: An n-D tensor, specifying the tensor to be sliced.
2372      * * 1: begin, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2373      *      starts of the dimensions of the input tensor to be sliced. The
2374      *      length must be of rank(input0).
2375      * * 2: end, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2376      *      ends of the dimensions of the input tensor to be sliced. The length
2377      *      must be of rank(input0).
2378      * * 3: strides, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
2379      *      strides of the dimensions of the input tensor to be sliced. The
2380      *      length must be of rank(input0). The entries must be non-zero.
2381      * * 4: begin_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit
2382      *      of begin_mask is set, begin[i] is ignored and the fullest possible
2383      *      range in that dimension is used instead.
2384      * * 5: end_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit of
2385      *      end_mask is set, end[i] is ignored and the fullest possible range in
2386      *      that dimension is used instead.
2387      * * 6: shrink_axis_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the
2388      *      ith bit of shrink_axis_mask is set, the ith dimension specification
2389      *      shrinks the dimensionality by 1, taking on the value at index
2390      *      begin[i]. In this case, the ith specification must define a
2391      *      slice of size 1, e.g. begin[i] = x, end[i] = x + 1.
2392      *
2393      * Outputs:
2394      * * 0: A tensor of the same {@link OperandCode} as input0 and rank (n - k),
2395      *      where k is the number of bits set in shrink_axis_mask.
2396      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2397      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2398      *      the scale and zeroPoint must be the same as input0.
2399      *      If shrink_axis_mask is true for all input dimensions, the output
2400      *      shape is [1].
2401      *
2402      * Available since NNAPI feature level 2.
2403      */
2404     ANEURALNETWORKS_STRIDED_SLICE = 35,
2405 
2406     /**
2407      * Element-wise subtraction of two tensors.
2408      *
2409      * Takes two input tensors of identical {@link OperandCode} and compatible
2410      * dimensions. The output is the result of subtracting the second input
2411      * tensor from the first one, optionally modified by an activation function.
2412      *
2413      * Two dimensions are compatible when:
2414      *     1. they are equal, or
2415      *     2. one of them is 1
2416      *
2417      * The size of the output is the maximum size along each dimension of the
2418      * input operands. It starts with the trailing dimensions, and works its way
2419      * forward.
2420      *
2421      * Example:
2422      *     input1.dimension =    {4, 1, 2}
2423      *     input2.dimension = {5, 4, 3, 1}
2424      *     output.dimension = {5, 4, 3, 2}
2425      *
2426      * Since NNAPI feature level 3, generic zero-sized input tensor is supported. Zero
2427      * dimension is only compatible with 0 or 1. The size of the output
2428      * dimension is zero if either of corresponding input dimension is zero.
2429      *
2430      * Supported tensor {@link OperandCode}:
2431      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2432      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2433      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since NNAPI feature level 3)
2434      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2435      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
2436      *
2437      * Supported tensor rank: up to 4
2438      *
2439      * Inputs:
2440      * * 0: An n-D tensor, specifying the first input.
2441      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
2442      *      as input0.
2443      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
2444      *      {@link FuseCode} values. Specifies the activation to
2445      *      invoke on the result.
2446      *      For a {@link ANEURALNETWORKS_TENSOR_INT32} tensor,
2447      *      the {@link FuseCode} must be "NONE".
2448      *
2449      * Outputs:
2450      * * 0: A tensor of the same {@link OperandCode} as input0.
2451      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2452      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2453      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
2454      *
2455      * Available since NNAPI feature level 2.
2456      */
2457     ANEURALNETWORKS_SUB = 36,
2458 
2459     /**
2460      * Transposes the input tensor, permuting the dimensions according to the
2461      * perm tensor.
2462      *
2463      * The returned tensor's dimension i corresponds to the input dimension
2464      * perm[i]. If perm is not given, it is set to (n-1...0), where n is the
2465      * rank of the input tensor. Hence by default, this operation performs a
2466      * regular matrix transpose on 2-D input Tensors.
2467      *
2468      * Supported tensor {@link OperandCode}:
2469      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since NNAPI feature level 3)
2470      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2471      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2472      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2473      *
2474      * Supported tensor rank: up to 4
2475      *
2476      * Inputs:
2477      * * 0: An n-D tensor, specifying the tensor to be transposed.
2478      *      Since NNAPI feature level 3, this tensor may be zero-sized.
2479      * * 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
2480      *      the permutation of the dimensions of the input tensor.
2481      *
2482      * Outputs:
2483      * * 0: A tensor of the same {@link OperandCode} as input0.
2484      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
2485      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
2486      *      the scale and zeroPoint must be the same as input0.
2487      *
2488      * Available since NNAPI feature level 2.
2489      */
2490     ANEURALNETWORKS_TRANSPOSE = 37,
2491 
2492     // Operations below are available since NNAPI feature level 3.
2493 
2494     /**
2495      * Computes the absolute value of a tensor, element-wise.
2496      *
2497      * Supported tensor {@link OperandCode}:
2498      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2499      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2500      * * {@link ANEURALNETWORKS_TENSOR_INT32} (since NNAPI feature level 4)
2501      *
2502      * Supported tensor rank: from 1.
2503      *
2504      * Inputs:
2505      * * 0: A tensor.
2506      *
2507      * Outputs:
2508      * * 0: The output tensor of same shape as input0.
2509      *
2510      * Available since NNAPI feature level 3.
2511      */
2512     ANEURALNETWORKS_ABS = 38,
2513 
2514     /**
2515      * Returns the index of the largest element along an axis.
2516      *
2517      * Supported tensor {@link OperandCode}:
2518      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2519      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2520      * * {@link ANEURALNETWORKS_TENSOR_INT32}
2521      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2522      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2523      *
2524      * Supported tensor rank: from 1
2525      *
2526      * Inputs:
2527      * * 0: An n-D tensor specifying the input. Must be non-empty.
2528      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
2529      *      reduce across. Negative index is used to specify axis from the
2530      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
2531      *
2532      * Outputs:
2533      * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor.
2534      *      If input is 1-dimensional, the output shape is [1].
2535      *
2536      * Available since NNAPI feature level 3.
2537      */
2538     // There is no underscore in ARG_MAX to avoid name conflict with
2539     // the macro defined in libc/kernel/uapi/linux/limits.h.
2540     ANEURALNETWORKS_ARGMAX = 39,
2541 
2542     /**
2543      * Returns the index of the smallest element along an axis.
2544      *
2545      * Supported tensor {@link OperandCode}:
2546      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2547      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2548      * * {@link ANEURALNETWORKS_TENSOR_INT32}
2549      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
2550      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
2551      *
2552      * Supported tensor rank: from 1
2553      *
2554      * Inputs:
2555      * * 0: An n-D tensor specifying the input. Must be non-empty.
2556      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
2557      *      reduce across. Negative index is used to specify axis from the
2558      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
2559      *
2560      * Outputs:
2561      * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor.
2562      *      If input is 1-dimensional, the output shape is [1].
2563      *
2564      * Available since NNAPI feature level 3.
2565      */
2566     ANEURALNETWORKS_ARGMIN = 40,  // See ARGMAX for naming discussion.
2567 
2568     /**
2569      * Transform axis-aligned bounding box proposals using bounding box deltas.
2570      *
2571      * Given the positions of bounding box proposals and the corresponding
2572      * bounding box deltas for each class, return the refined bounding box
2573      * regions. The resulting bounding boxes are cliped against the edges of
2574      * the image.
2575      *
2576      * Supported tensor {@link OperandCode}:
2577      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2578      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2579      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}
2580      *
2581      * Inputs:
2582      * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
2583      *      bounding box proposals, each line with format [x1, y1, x2, y2].
2584      *      For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
2585      *      the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
2586      *      is supported for this tensor.
2587      * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
2588      *      bounding box delta for each region of interest and each class. The
2589      *      bounding box deltas are organized in the following order
2590      *      [dx, dy, dw, dh], where dx and dy is the relative correction factor
2591      *      for the center position of the bounding box with respect to the width
2592      *      and height, dw and dh is the log-scale relative correction factor
2593      *      for the width and height. For input0 of type
2594      *      {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, this tensor should be
2595      *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
2596      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}. Zero num_rois is
2597      *      supported for this tensor.
2598      * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
2599      *      [num_rois], specifying the batch index of each box. Boxes with
2600      *      the same batch index are grouped together. Zero num_rois is
2601      *      supported for this tensor.
2602      * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
2603      *      each image in the batch, each line with format
2604      *      [image_height, image_width].
2605      *
2606      * Outputs:
2607      * * 0: A tensor of the same {@link OperandCode} as input0, with shape
2608      *      [num_rois, num_classes * 4], specifying the coordinates of each
2609      *      output bounding box for each class, with format [x1, y1, x2, y2].
2610      *      For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
2611      *      scale must be 0.125 and the zero point must be 0.
2612      *
2613      * Available since NNAPI feature level 3.
2614      */
2615     ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM = 41,
2616 
2617     /**
2618      * A recurrent neural network layer that applies an LSTM cell to a
2619      * sequence of inputs in forward and backward directions.
2620      *
2621      * The op supports cross-linking via an auxiliary input. Regular cell feeds
2622      * one input into the two RNN cells in the following way:
2623      *
2624      *       INPUT  (INPUT_REVERSED)
2625      *         |         |
2626      *    ---------------------
2627      *    | FW_LSTM   BW_LSTM |
2628      *    ---------------------
2629      *         |         |
2630      *      FW_OUT     BW_OUT
2631      *
2632      * An op with cross-linking takes two inputs and feeds them into the RNN
2633      * cells in the following way:
2634      *
2635      *       AUX_INPUT   (AUX_INPUT_REVERSED)
2636      *           |             |
2637      *     INPUT | (INPUT_R'D.)|
2638      *       |   |       |     |
2639      *    -----------------------
2640      *    |  \  /        \    / |
2641      *    | FW_LSTM     BW_LSTM |
2642      *    -----------------------
2643      *         |           |
2644      *      FW_OUT      BW_OUT
2645      *
2646      * The cross-linking mode is enabled iff auxiliary input and auxiliary
2647      * weights are present. While stacking this op on top of itself, this
2648      * allows to connect both forward and backward outputs from previous cell
2649      * to the next cell's input.
2650      *
2651      * Since NNAPI feature level 4 parallel linking mode is supported. The mode is
2652      * enabled if auxiliary input is present but auxiliary weights are omitted.
2653      * In this case, the cell feeds inputs into the RNN in the following way:
2654      *
2655      *       INPUT (AUX_INPUT_REVERSED)
2656      *         |         |
2657      *    ---------------------
2658      *    | FW_LSTM   BW_LSTM |
2659      *    ---------------------
2660      *         |         |
2661      *      FW_OUT     BW_OUT
2662      *
2663      * While stacking this op on top of itself, this allows to connect both
2664      * forward and backward outputs from previous cell to the next cell's
2665      * corresponding inputs.
2666      *
2667      * Supported tensor {@link OperandCode}:
2668      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2669      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2670      *
2671      * Supported tensor rank: 3, either time-major or batch-major.
2672      *
2673      * All input and output tensors must be of the same type.
2674      *
2675      * Inputs:
2676      * * 0: The input.
2677      *      A 3-D tensor of shape:
2678      *        If time-major: [max_time, batch_size, input_size]
2679      *        If batch-major: [batch_size, max_time, input_size]
2680      *      where "max_time" is the number of timesteps (sequence length),
2681      *      "batch_size" corresponds to the batching dimension, and
2682      *      "input_size" is the size of the input.
2683      * * 1: The forward input-to-input weights. Optional.
2684      *      A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units”
2685      *      corresponds to the number of forward cell units.
2686      * * 2: The forward input-to-forget weights.
2687      *      A 2-D tensor of shape [fw_num_units, input_size].
2688      * * 3: The forward input-to-cell weights.
2689      *      A 2-D tensor of shape [fw_num_units, input_size].
2690      * * 4: The forward input-to-output weights.
2691      *      A 2-D tensor of shape [fw_num_units, input_size].
2692      * * 5: The forward recurrent-to-input weights. Optional.
2693      *      A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size”
2694      *      corresponds to either the number of cell units (i.e., fw_num_units),
2695      *      or the second dimension of the “fw_projection_weights”, if defined.
2696      * * 6: The forward recurrent-to-forget weights.
2697      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2698      * * 7: The forward recurrent-to-cell weights.
2699      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2700      * * 8: The forward recurrent-to-output weights.
2701      *      A 2-D tensor of shape [fw_num_units, fw_output_size].
2702      * * 9: The forward cell-to-input weights. Optional.
2703      *      A 1-D tensor of shape [fw_num_units].
2704      * * 10: The forward cell-to-forget weights. Optional.
2705      *       A 1-D tensor of shape [fw_num_units].
2706      * * 11: The forward cell-to-output weights. Optional.
2707      *       A 1-D tensor of shape [fw_num_units].
2708      * * 12: The forward input gate bias. Optional.
2709      *       A 1-D tensor of shape [fw_num_units].
2710      * * 13: The forward forget gate bias.
2711      *       A 1-D tensor of shape [fw_num_units].
2712      * * 14: The forward cell gate bias.
2713      *       A 1-D tensor of shape [fw_num_units].
2714      * * 15: The forward output gate bias.
2715      *       A 1-D tensor of shape [fw_num_units].
2716      * * 16: The forward projection weights. Optional.
2717      *       A 2-D tensor of shape [fw_output_size, fw_num_units].
2718      * * 17: The forward projection bias. Optional.
2719      *       A 1-D tensor of shape [fw_output_size].
2720      * * 18: The backward input-to-input weights. Optional.
2721      *       A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units”
2722      *       corresponds to the number of backward cell units.
2723      * * 19: The backward input-to-forget weights.
2724      *       A 2-D tensor of shape [bw_num_units, input_size].
2725      * * 20: The backward input-to-cell weights.
2726      *       A 2-D tensor of shape [bw_num_units, input_size].
2727      * * 21: The backward input-to-output weights.
2728      *       A 2-D tensor of shape [bw_num_units, input_size].
2729      * * 22: The backward recurrent-to-input weights. Optional.
2730      *       A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size”
2731      *       corresponds to either the number of cell units (i.e., “bw_num_units”),
2732      *       or the second dimension of the “bw_projection_weights”, if defined.
2733      * * 23: The backward recurrent-to-forget weights.
2734      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2735      * * 24: The backward recurrent-to-cell weights.
2736      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2737      * * 25: The backward recurrent-to-output weights.
2738      *       A 2-D tensor of shape [bw_num_units, bw_output_size].
2739      * * 26: The backward cell-to-input weights. Optional.
2740      *       A 1-D tensor of shape [bw_num_units].
2741      * * 27: The backward cell-to-forget weights. Optional.
2742      *       A 1-D tensor of shape [bw_num_units].
2743      * * 28: The backward cell-to-output weights. Optional.
2744      *       A 1-D tensor of shape [bw_num_units].
2745      * * 29: The backward input gate bias. Optional.
2746      *       A 1-D tensor of shape [bw_num_units].
2747      * * 30: The backward forget gate bias.
2748      *       A 1-D tensor of shape [bw_num_units].
2749      * * 31: The backward cell gate bias.
2750      *       A 1-D tensor of shape [bw_num_units].
2751      * * 32: The backward output gate bias.
2752      *       A 1-D tensor of shape [bw_num_units].
2753      * * 33: The backward projection weights. Optional.
2754      *       A 2-D tensor of shape [bw_output_size, bw_num_units].
2755      * * 34: The backward projection bias. Optional.
2756      *       A 1-D tensor of shape [bw_output_size].
2757      * * 35: The forward input activation state.
2758      *       A 2-D tensor of shape [batch_size, bw_output_size].
2759      * * 36: The forward input cell state.
2760      *       A 2-D tensor of shape [batch_size, bw_num_units].
2761      * * 37: The backward input activation state.
2762      *       A 2-D tensor of shape [batch_size, bw_output_size].
2763      * * 38: The backward input cell state.
2764      *       A 2-D tensor of shape [batch_size, bw_num_units].
2765      * * 39: The auxiliary input. Optional.
2766      *       A 3-D tensor of shape [max_time, batch_size, aux_input_size],
2767      *       where “batch_size” corresponds to the batching dimension, and
2768      *       “aux_input_size” is the size of the auxiliary input. Optional. See
2769      *       the docs above for the usage modes explanation.
2770      * * 40: The forward auxiliary input-to-input weights.
2771      *       Optional. See the docs above for the usage modes explanation.
2772      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2773      * * 41: The forward auxiliary input-to-forget weights.
2774      *       Optional. See the docs above for the usage modes explanation.
2775      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2776      * * 42: The forward auxiliary input-to-cell weights.
2777      *       Optional. See the docs above for the usage modes explanation.
2778      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2779      * * 43: The forward auxiliary input-to-output weights.
2780      *       Optional. See the docs above for the usage modes explanation.
2781      *       A 2-D tensor of shape [fw_num_units, aux_input_size].
2782      * * 44: The backward auxiliary input-to-input weights.
2783      *       Optional. See the docs above for the usage modes explanation.
2784      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2785      * * 45: The backward auxiliary input-to-forget weights.
2786      *       Optional. See the docs above for the usage modes explanation.
2787      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2788      * * 46: The backward auxiliary input-to-cell weights.
2789      *       Optional. See the docs above for the usage modes explanation.
2790      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2791      * * 47: The backward auxiliary input-to-output weights.
2792      *       Optional. See the docs above for the usage modes explanation.
2793      *       A 2-D tensor of shape [bw_num_units, aux_input_size].
2794      * * 48: The activation function.
2795      *       A value indicating the activation function:
2796      *       <ul>
2797      *       <li>0: None;
2798      *       <li>1: Relu;
2799      *       <li>3: Relu6;
2800      *       <li>4: Tanh;
2801      *       <li>6: Sigmoid.
2802      *       </ul>
2803      * * 49: The clipping threshold for the cell state, such
2804      *       that values are bound within [-cell_clip, cell_clip]. If set to 0.0
2805      *       then clipping is disabled.
2806      *       If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32},
2807      *       this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
2808      *       otherwise if all the input tensors have the type
2809      *       {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be
2810      *       of type {@link ANEURALNETWORKS_FLOAT16}.
2811      * * 50: The clipping threshold for the output from the
2812      *       projection layer, such that values are bound within
2813      *       [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2814      *       If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32},
2815      *       this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
2816      *       otherwise if all the input tensors have the type
2817      *       {@link ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be
2818      *       of type {@link ANEURALNETWORKS_FLOAT16}.
2819      * * 51: merge_outputs
2820      *       An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs
2821      *       from forward and backward cells should be merged.
2822      * * 52: time_major
2823      *       An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format
2824      *       of input and output tensors.
2825      * * 53: The forward input layer normalization weights. Optional.
2826      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2827      *       to activation at input gate.
2828      * * 54: The forward forget layer normalization weights. Optional.
2829      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2830      *       to activation at forget gate.
2831      * * 55: The forward cell layer normalization weights. Optional.
2832      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2833      *       to activation at cell gate.
2834      * * 56: The forward output layer normalization weights. Optional.
2835      *       A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
2836      *       to activation at output gate.
2837      * * 57: The backward input layer normalization weights. Optional.
2838      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2839      *       to activation at input gate.
2840      * * 58: The backward forget layer normalization weights. Optional.
2841      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2842      *       to activation at forget gate.
2843      * * 59: The backward cell layer normalization weights. Optional.
2844      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2845      *       to activation at cell gate.
2846      * * 60: The backward output layer normalization weights. Optional.
2847      *       A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
2848      *       to activation at output gate.
2849      *
2850      * Outputs:
2851      * * 0: The forward output.
2852      *      A 3-D tensor of shape:
2853      *        If time-major and not merge_outputs:
2854      *          [max_time, batch_size, fw_output_size]
2855      *        If time-major and merge_outputs:
2856      *          [max_time, batch_size, fw_output_size + bw_output_size]
2857      *        If batch-major and not merge_outputs:
2858      *          [batch_size, max_time, fw_output_size]
2859      *        If batch-major and merge_outputs:
2860      *          [batch_size, max_time, fw_output_size + bw_output_size]
2861      * * 1: The backward output.  Unused if merge_outputs is true.
2862      *      A 3-D tensor of shape:
2863      *        If time-major: [max_time, batch_size, bw_output_size]
2864      *        If batch-major: [batch_size, max_time, bw_output_size]
2865      * * 2: The forward activation state output.
2866      *      A 2-D tensor of shape [batch_size, fw_output_size] containing an
2867      *      activation state from the last time step in the sequence. This
2868      *      output is optional and can be omitted. If this output is present
2869      *      then outputs 3-5 must be present as well.
2870      *      Available since NNAPI feature level 4.
2871      * * 3: The forward cell state output.
2872      *      A tensor of shape [batch_size, fw_cell_size] containing a cell state
2873      *      from the last time step in the sequence. This output is optional
2874      *      and can be omitted. If this output is present
2875      *      then outputs 2, 4, 5 must be present as well.
2876      *      Available since NNAPI feature level 4.
2877      * * 4: The backward activation state output.
2878      *      A 2-D tensor of shape [batch_size, bw_output_size] containing an
2879      *      activation state from the last time step in the sequence. This
2880      *      output is optional and can be omitted. If this output is present
2881      *      then outputs 2, 3, 5 must be present as well.
2882      *      Available since NNAPI feature level 4.
2883      * * 5: The backward cell state output.
2884      *      A tensor of shape [batch_size, bw_cell_size] containing a cell state
2885      *      from the last time step in the sequence. This output is optional
2886      *      and can be omitted. If this output is present
2887      *      then outputs 2-4 must be present as well.
2888      *      Available since NNAPI feature level 4.
2889      *
2890      * Available since NNAPI feature level 3.
2891      *
2892      * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out
2893      * and NNAPI does not maintain internal states. This operator does not support the usage pattern
2894      * in which multiple cells are chained and state tensors are propagated.
2895      */
2896     ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42,
2897 
2898     /**
2899      * A recurrent neural network layer that applies a basic RNN cell to a
2900      * sequence of inputs in forward and backward directions.
2901      *
2902      * This Op unrolls the input along the sequence dimension, and implements
2903      * the following operation for each element in the sequence s =
2904      * 1...sequence_length:
2905      *   fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ +
2906      *          fw_state * fw_recurrent_weights’ + fw_bias)
2907      *
2908      * And for each element in sequence t = sequence_length : 1
2909      *   bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ +
2910      *          bw_state * bw_recurrent_weights’ + bw_bias)
2911      *
2912      * Where:
2913      * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs;
2914      * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the
2915      *    current “state” which itself is the output from the previous time step
2916      *    computation;
2917      * * “{fw,bw}_bias” is a bias vector (added to each output vector in the
2918      *    batch);
2919      * * “activation” is the function passed as the “fused_activation_function”
2920      *   argument (if not “NONE”).
2921      *
2922      * The op supports cross-linking via an auxiliary input. Regular cell feeds
2923      * one input into the two RNN cells in the following way:
2924      *
2925      *       INPUT  (INPUT_REVERSED)
2926      *         |         |
2927      *    ---------------------
2928      *    | FW_RNN     BW_RNN |
2929      *    ---------------------
2930      *         |         |
2931      *      FW_OUT     BW_OUT
2932      *
2933      * An op with cross-linking takes two inputs and feeds them into the RNN
2934      * cells in the following way:
2935      *
2936      *       AUX_INPUT   (AUX_INPUT_REVERSED)
2937      *           |             |
2938      *     INPUT | (INPUT_R'D.)|
2939      *       |   |       |     |
2940      *    -----------------------
2941      *    |  \  /        \    / |
2942      *    | FW_RNN       BW_RNN |
2943      *    -----------------------
2944      *         |           |
2945      *      FW_OUT      BW_OUT
2946      *
2947      * The cross-linking mode is enabled iff auxiliary input and auxiliary
2948      * weights are present. While stacking this op on top of itself, this
2949      * allows to connect both forward and backward outputs from previous cell
2950      * to the next cell's input.
2951      *
2952      * Since NNAPI feature level 4 parallel linking mode is supported. The mode is
2953      * enabled if auxiliary input is present but auxiliary weights are omitted.
2954      * In this case, the cell feeds inputs into the RNN in the following way:
2955      *
2956      *       INPUT (AUX_INPUT_REVERSED)
2957      *         |         |
2958      *    ---------------------
2959      *    | FW_RNN     BW_RNN |
2960      *    ---------------------
2961      *         |         |
2962      *      FW_OUT     BW_OUT
2963      *
2964      * While stacking this op on top of itself, this allows to connect both
2965      * forward and backward outputs from previous cell to the next cell's
2966      * corresponding inputs.
2967      *
2968      * Supported tensor {@link OperandCode}:
2969      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
2970      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
2971      *
2972      * The input tensors must all be the same type.
2973      *
2974      * Inputs:
2975      * * 0: input.
2976      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
2977      *      it is set to true, then the input has a shape [maxTime, batchSize,
2978      *      inputSize], otherwise the input has a shape [batchSize, maxTime,
2979      *      inputSize].
2980      * * 1: fwWeights.
2981      *      A 2-D tensor of shape [fwNumUnits, inputSize].
2982      * * 2: fwRecurrentWeights.
2983      *      A 2-D tensor of shape [fwNumUnits, fwNumUnits].
2984      * * 3: fwBias.
2985      *      A 1-D tensor of shape [fwNumUnits].
2986      * * 4: fwHiddenState.
2987      *      A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden
2988      *      state input for the first time step of the computation.
2989      * * 5: bwWeights.
2990      *      A 2-D tensor of shape [bwNumUnits, inputSize].
2991      * * 6: bwRecurrentWeights.
2992      *      A 2-D tensor of shape [bwNumUnits, bwNumUnits].
2993      * * 7: bwBias.
2994      *      A 1-D tensor of shape [bwNumUnits].
2995      * * 8: bwHiddenState
2996      *      A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden
2997      *      state input for the first time step of the computation.
2998      * * 9: auxInput.
2999      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
3000      *      it is set to true, then the input has a shape [maxTime, batchSize,
3001      *      auxInputSize], otherwise the input has a shape [batchSize, maxTime,
3002      *      auxInputSize]. Can be omitted. See the docs above for the usage
3003      *      modes explanation.
3004      * * 10:fwAuxWeights.
3005      *      A 2-D tensor of shape [fwNumUnits, auxInputSize]. Can be omitted.
3006      *      See the docs above for the usage modes explanation.
3007      * * 11:bwAuxWeights.
3008      *      A 2-D tensor of shape [bwNumUnits, auxInputSize]. Can be omitted.
3009      *      See the docs above for the usage modes explanation.
3010      * * 12:fusedActivationFunction.
3011      *      A {@link FuseCode} value indicating the activation function. If
3012      *      “NONE” is specified then it results in a linear activation.
3013      * * 13:timeMajor
3014      *      An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format
3015      *      of input and output tensors.
3016      * * 14:mergeOutputs
3017      *      An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs
3018      *      from forward and backward cells are separate (if set to false) or
3019      *      concatenated (if set to true).
3020      * Outputs:
3021      * * 0: fwOutput.
3022      *      A 3-D tensor. The first two dimensions of the shape are defined by
3023      *      the input 6 (timeMajor) and the third dimension is defined by the
3024      *      input 14 (mergeOutputs). If timeMajor is set to true, then the first
3025      *      two dimensions are [maxTime, batchSize], otherwise they are set to
3026      *      [batchSize, maxTime]. If mergeOutputs is set to true, then the third
3027      *      dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set
3028      *      to fwNumUnits.
3029      * * 1: bwOutput.
3030      *      A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then
3031      *      this tensor is not produced. The shape is defined by the input 6
3032      *      (timeMajor). If it is set to true, then the shape is set to
3033      *      [maxTime, batchSize, bwNumUnits], otherwise the shape is set to
3034      *      [batchSize, maxTime, bwNumUnits].
3035      * * 2: The forward hidden state output.
3036      *      A 2-D tensor of shape [batchSize, fwNumUnits] containing a hidden
3037      *      state from the last time step in the sequence. This output is
3038      *      optional and can be omitted. If this output is present then output
3039      *      3 must be present as well.
3040      *      Available since NNAPI feature level 4.
3041      * * 3: The backward hidden state output.
3042      *      A 2-D tensor of shape [batchSize, bwNumUnits] containing a hidden
3043      *      state from the last time step in the sequence. This output is
3044      *      optional and can be omitted. If this output is present then output
3045      *      2 must be present as well.
3046      *      Available since NNAPI feature level 4.
3047      *
3048      * Available since NNAPI feature level 3.
3049      *
3050      * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out
3051      * and NNAPI does not maintain internal states. This operator does not support the usage pattern
3052      * in which multiple cells are chained and state tensors are propagated.
3053      */
3054     ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN = 43,
3055 
3056     /**
3057      * Greedily selects a subset of bounding boxes in descending order of score.
3058      *
3059      * This op applies NMS algorithm to each class. In each loop of execution,
3060      * the box with maximum score gets selected and removed from the pending set.
3061      * The scores of the rest of boxes are lowered according to the
3062      * intersection-over-union (IOU) overlapping with the previously selected
3063      * boxes and a specified NMS kernel method. Any boxes with score less
3064      * than a threshold are removed from the pending set.
3065      *
3066      * Three NMS kernels are supported:
3067      * * Hard:     score_new = score_old * (1 if IoU < threshold else 0)
3068      * * Linear:   score_new = score_old * (1 if IoU < threshold else 1 - IoU)
3069      * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
3070      *
3071      * Axis-aligned bounding boxes are represented by its upper-left corner
3072      * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
3073      * bounding box should satisfy x1 <= x2 and y1 <= y2.
3074      *
3075      * Supported tensor {@link OperandCode}:
3076      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3077      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3078      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3079      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3080      *
3081      * Inputs:
3082      * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
3083      *      of each bounding box proposal. The boxes are grouped by batches in the
3084      *      first dimension. Zero num_rois is supported for this tensor.
3085      * * 1: A 2-D Tensor specifying the bounding boxes of shape
3086      *      [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
3087      *      The boxes are grouped by batches in the first dimension. The sequential
3088      *      order of the boxes corresponds with input0. For input0 of type
3089      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should be of
3090      *      {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
3091      *      scale of 0.125.
3092      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
3093      *      this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
3094      *      with zeroPoint of -128 and scale of 0.125.
3095      *      Zero num_rois is supported for this tensor.
3096      * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3097      *      [num_rois], specifying the batch index of each box. Boxes with
3098      *      the same batch index are grouped together.
3099      * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, score_threshold. Boxes
3100      *      with scores lower than the threshold are filtered before sending
3101      *      to the NMS algorithm.
3102      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
3103      *      number of selected bounding boxes for each image. Set to a negative
3104      *      value for unlimited number of output bounding boxes.
3105      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the NMS
3106      *      kernel method, options are 0:hard, 1:linear, 2:gaussian.
3107      * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU
3108      *      threshold in hard and linear NMS kernel. This field is ignored if
3109      *      gaussian kernel is selected.
3110      * * 7: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the sigma in
3111      *      gaussian NMS kernel. This field is ignored if gaussian kernel is
3112      *      not selected.
3113      * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, nms_score_threshold.
3114      *      Boxes with scores lower than the threshold are dropped during the
3115      *      score updating phase in soft NMS.
3116      *
3117      * Outputs:
3118      * * 0: A 1-D Tensor of the same {@link OperandCode} as input0, with shape
3119      *      [num_output_rois], specifying the score of each output box. The boxes
3120      *      are grouped by batches, but the sequential order in each batch is not
3121      *      guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
3122      *      guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3123      *      or {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
3124      *      the scale and zero point must be the same as input0.
3125      * * 1: A 2-D Tensor of the same {@link OperandCode} as input1, with shape
3126      *      [num_output_rois, 4], specifying the coordinates of each
3127      *      output bounding box with the same format as input1. The sequential
3128      *      order of the boxes corresponds with output0. For type of
3129      *      {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the scale must be
3130      *      0.125 and the zero point must be 0.
3131      * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3132      *      [num_output_rois], specifying the class of each output box. The
3133      *      sequential order of the boxes corresponds with output0.
3134      * * 3: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3135      *      [num_output_rois], specifying the batch index of each box. Boxes
3136      *      with the same batch index are grouped together.
3137      *
3138      * Available since NNAPI feature level 3.
3139      */
3140     ANEURALNETWORKS_BOX_WITH_NMS_LIMIT = 44,
3141 
3142     /**
3143      * Casts a tensor to a type.
3144      *
3145      * This operation ignores the scale and zeroPoint of quanized tensors,
3146      * e.g. it treats a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} input
3147      * as a tensor of uint8 values.
3148      *
3149      * Supported tensor {@link OperandCode}:
3150      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3151      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3152      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3153      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3154      * Since NNAPI feature level 4, casting tensors of the following
3155      * {@link OperandCode} to the same {@link OperandCode} is supported:
3156      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3157      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3158      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}
3159      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
3160      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
3161      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
3162      *
3163      * Supported tensor rank: from 1
3164      *
3165      * Inputs:
3166      * * 0: A tensor.
3167      *
3168      * Outputs:
3169      * * 0: A tensor with the same shape as input0.
3170      *
3171      * Available since NNAPI feature level 3.
3172      */
3173     ANEURALNETWORKS_CAST = 45,
3174 
3175     /**
3176      * Shuffle the channels of the input tensor.
3177      *
3178      * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE
3179      * divide the channel dimension into num_groups groups, and reorganize the
3180      * channels by grouping channels with the same index in each group.
3181      *
3182      * Along the channel dimension, the output is calculated using this formula:
3183      *
3184      *     output_channel[k * num_groups + g] = input_channel[g * group_size + k]
3185      *
3186      * where group_size = num_channels / num_groups
3187      *
3188      * The number of channels must be divisible by num_groups.
3189      *
3190      * Supported tensor {@link OperandCode}:
3191      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3192      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3193      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3194      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3195      *
3196      * Supported tensor rank: up to 4
3197      *
3198      * Inputs:
3199      * * 0: An n-D tensor, specifying the tensor to be shuffled.
3200      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
3201      *      groups.
3202      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the dimension
3203      *      channel shuffle would be performed on. Negative index is used to
3204      *      specify axis from the end (e.g. -1 for the last axis). Must be in
3205      *      the range [-n, n).
3206      *
3207      * Outputs:
3208      * * 0: A tensor of the same {@link OperandCode} and same shape as input0.
3209      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3210      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3211      *      the scale and zeroPoint must be the same as input0.
3212      *
3213      * Available since NNAPI feature level 3.
3214      */
3215     ANEURALNETWORKS_CHANNEL_SHUFFLE = 46,
3216 
3217     /**
3218      * Apply postprocessing steps to bounding box detections.
3219      *
3220      * Bounding box detections are generated by applying transformation on a set
3221      * of predefined anchors with the bounding box deltas from bounding box
3222      * regression. A final step of hard NMS is applied to limit the number of
3223      * returned boxes.
3224      *
3225      * Supported tensor {@link OperandCode}:
3226      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3227      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3228      *
3229      * Inputs:
3230      * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying
3231      *      the score of each anchor with each class. Class 0 for each
3232      *      [batches, num_anchors, 0] is background and will be ignored.
3233      * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with
3234      *      the first four values in length_box_encoding specifying the bounding
3235      *      box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw],
3236      *      where dy and dx is the linear-scale relative correction factor for the
3237      *      center position of the bounding box with respect to the width and height,
3238      *      dh and dw is the log-scale relative correction factor for the width and
3239      *      height. All the entries in length_box_encoding beyond the first four
3240      *      values are ignored in this operation.
3241      * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
3242      *      predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and
3243      *      ctr_x are the center position of the box, and h and w are the height
3244      *      and the width.
3245      * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3246      *      factor for dy in bounding box deltas.
3247      * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3248      *      factor for dx in bounding box deltas.
3249      * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3250      *      factor for dh in bounding box deltas.
3251      * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
3252      *      factor for dw in bounding box deltas.
3253      * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to use regular
3254      *      multi-class NMS algorithm that do NMS separately for each class,
3255      *      set to false for a faster algorithm that only do one single NMS
3256      *      using the highest class score..
3257      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, max_num_detections, specifying
3258      *      the maximum number of boxes for the output. Boxes with the lowest
3259      *      scores are discarded to meet the limit.
3260      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is
3261      *      set to false, specifying the maximum number of classes per detection.
3262      * * 10: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is
3263      *       set to true, specifying the maximum number of detections when
3264      *       applying NMS algorithm for each single class.
3265      * * 11: A scalar, score_threshold. Boxes with scores lower than the
3266      *       threshold are filtered before sending to the NMS algorithm. The
3267      *       scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of
3268      *       {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3269      *       {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3270      *       {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3271      * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar
3272      *       must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of
3273      *       {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3274      *       {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3275      *       {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3276      * * 13: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to include
3277      *       background class in the list of label map for the output, set
3278      *       to false to not include the background. When the background
3279      *       class is included, it has label 0 and the output classes start
3280      *       at 1 in the label map, otherwise, the output classes start at 0.
3281      *
3282      * Outputs:
3283      * * 0: A 2-D tensor of the same {@link OperandCode} as input0, with shape
3284      *      [batches, max_num_detections], specifying the score of each output
3285      *      detections.
3286      * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the
3287      *      coordinates of each output bounding box, with format
3288      *      [y1, x1, y2, x2].
3289      * * 2: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3290      *      [batches, max_num_detections], specifying the class label for each
3291      *      output detection.
3292      * * 3: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape [batches],
3293      *      specifying the number of valid output detections for each batch.
3294      *
3295      * Available since NNAPI feature level 3.
3296      */
3297     ANEURALNETWORKS_DETECTION_POSTPROCESSING = 47,
3298 
3299     /**
3300      * For input tensors x and y, computes x == y elementwise.
3301      *
3302      * Supported tensor {@link OperandCode}:
3303      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3304      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3305      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3306      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3307      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3308      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3309      *
3310      * Supported tensor rank: from 1
3311      *
3312      * This operation supports broadcasting.
3313      *
3314      * Inputs:
3315      * * 0: A tensor.
3316      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3317      *      with input0.
3318      *
3319      * Outputs:
3320      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3321      *
3322      * Available since NNAPI feature level 3.
3323      */
3324     ANEURALNETWORKS_EQUAL = 48,
3325 
3326     /**
3327      * Computes exponential of x element-wise.
3328      *
3329      * Supported tensor {@link OperandCode}:
3330      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3331      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3332      *
3333      * Supported tensor rank: from 1.
3334      *
3335      * Inputs:
3336      * * 0: A tensor.
3337      *
3338      * Outputs:
3339      * * 0: The output tensor of same shape as input0.
3340      *
3341      * Available since NNAPI feature level 3.
3342      */
3343     ANEURALNETWORKS_EXP = 49,
3344 
3345     /**
3346      * Inserts a dimension of 1 into a tensor's shape.
3347      *
3348      * Given a tensor input, this operation inserts a dimension of 1 at the
3349      * given dimension index of input's shape. The dimension index starts at
3350      * zero; if you specify a negative dimension index, it is counted backward
3351      * from the end.
3352      *
3353      * Supported tensor {@link OperandCode}:
3354      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3355      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3356      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3357      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3358      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3359      *
3360      * Supported tensor rank: from 1
3361      *
3362      * Inputs:
3363      * * 0: An n-D tensor.
3364      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the dimension
3365      *      index to expand. Must be in the range [-(n + 1), (n + 1)).
3366      *
3367      * Outputs:
3368      * * 0: An (n + 1)-D tensor with the same {@link OperandCode} and data as
3369      *      input0.
3370      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3371      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3372      *      the scale and zeroPoint must be the same as input0.
3373      *
3374      * Available since NNAPI feature level 3.
3375      */
3376     ANEURALNETWORKS_EXPAND_DIMS = 50,
3377 
3378     /**
3379      * Gathers values along an axis.
3380      *
3381      * Produces an output tensor with shape
3382      *     input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:]
3383      * where:
3384      *     # Vector indices (output is rank(input0)).
3385      *     output[a_0, ..., a_n, i, b_0, ..., b_n] =
3386      *       input0[a_0, ..., a_n, indices[i], b_0, ..., b_n]
3387      *
3388      *     # Higher rank indices (output is rank(input0) + rank(indices) - 1).
3389      *     output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
3390      *       input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
3391      *
3392      * Supported tensor {@link OperandCode}:
3393      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3394      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3395      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3396      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3397      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3398      *
3399      * Supported tensor rank: from 1
3400      *
3401      * Inputs:
3402      * * 0: An n-D tensor from which to gather values.
3403      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis.
3404      *      Negative index is used to specify axis from the end
3405      *      (e.g. -1 for the last axis). Must be in the range [-n, n).
3406      * * 2: A k-D tensor {@link ANEURALNETWORKS_TENSOR_INT32} of indices.
3407      *      The values must be in the bounds of the corresponding dimensions
3408      *      of input0.
3409      *
3410      * Outputs:
3411      * * 0: An (n + k - 1)-D tensor with the same {@link OperandCode} as input0.
3412      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3413      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3414      *      the scale and zeroPoint must be the same as input0.
3415      *
3416      * Available since NNAPI feature level 3.
3417      */
3418     ANEURALNETWORKS_GATHER = 51,
3419 
3420     /**
3421      * Generate aixs-aligned bounding box proposals.
3422      *
3423      * Bounding box proposals are generated by applying transformation on a set
3424      * of predefined anchors with the bounding box deltas from bounding box
3425      * regression. A final step of hard NMS is applied to limit the number of
3426      * returned boxes.
3427      *
3428      * Axis-aligned bounding boxes are represented by its upper-left corner
3429      * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
3430      * bounding box should satisfy x1 <= x2 and y1 <= y2.
3431      *
3432      * Supported tensor {@link OperandCode}:
3433      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3434      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3435      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3436      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3437      *
3438      * Inputs:
3439      * * 0: A 4-D Tensor specifying the score of each anchor at each
3440      *      location. With "NHWC" data layout, the tensor shape is
3441      *      [batches, height, width, num_anchors]. With "NCHW" data layout,
3442      *      the tensor shape is [batches, num_anchors, height, width].
3443      * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data
3444      *      layout, the tensor shape is [batches, height, width, num_anchors * 4].
3445      *      With "NCHW" data layout, the tensor shape is
3446      *      [batches, num_anchors * 4, height, width]. The box deltas are encoded
3447      *      in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale
3448      *      relative correction factor for the center position of the bounding box
3449      *      with respect to the width and height, dw and dh is the log-scale
3450      *      relative correction factor for the width and height. The last
3451      *      dimensions is the channel dimension.
3452      * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
3453      *      predefined anchor, with format [x1, y1, x2, y2]. For input0 of type
3454      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3455      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor should be of
3456      *      {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with scale of 0.125.
3457      * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of
3458      *      each image in the batch, with format [image_height, image_width].
3459      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3460      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this
3461      *      tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with
3462      *      scale of 0.125.
3463      * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
3464      *      from the height of original image to the height of feature map.
3465      * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
3466      *      from the width of original image to the width of feature map.
3467      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
3468      *      number of boxes before going into the hard NMS algorithm. Boxes
3469      *      with the lowest scores are discarded to meet the limit. Set to
3470      *      a non-positive value for unlimited number.
3471      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
3472      *      number of boxes returning from the hard NMS algorithm. Boxes
3473      *      with the lowest scores are discarded to meet the limit. Set to
3474      *      a non-positive value for unlimited number.
3475      * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU
3476      *      threshold for hard NMS.
3477      * * 9: An {@link ANEURALNETWORKS_FLOAT32} scalar, min_size. Boxes with
3478      *      height or width lower than the absolute threshold are filtered out.
3479      * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3480      *       NCHW data layout for input0 and input1. Set to false for NHWC.
3481      *
3482      * Outputs:
3483      * * 0: A tensor of the same {@link OperandCode} as input0, of shape
3484      *      [num_output_rois], specifying the score of each output box.
3485      *      The boxes are grouped by batches, but the sequential order in
3486      *      each batch is not guaranteed. For type of
3487      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3488      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, the scale and zero
3489      *      point must be the same as input0.
3490      * * 1: A tensor of the same {@link OperandCode} as input3, of shape
3491      *      [num_output_rois, 4], specifying the coordinates of each output
3492      *      bounding box for each class, with format [x1, y1, x2, y2].
3493      *      The sequential order of the boxes corresponds with output0.
3494      *      For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
3495      *      scale must be 0.125 and the zero point must be 0.
3496      * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
3497      *      [num_output_rois], specifying the batch index of each box. Boxes
3498      *      with the same batch index are grouped together.
3499      *
3500      * Available since NNAPI feature level 3.
3501      */
3502     ANEURALNETWORKS_GENERATE_PROPOSALS = 52,
3503 
3504     /**
3505      * For input tensors x and y, computes x > y elementwise.
3506      *
3507      * Supported tensor {@link OperandCode}:
3508      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3509      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3510      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3511      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3512      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3513      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3514      *
3515      * Supported tensor rank: from 1
3516      *
3517      * This operation supports broadcasting.
3518      *
3519      * Inputs:
3520      * * 0: A tensor.
3521      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3522      *      with input0.
3523      *
3524      * Outputs:
3525      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3526      *
3527      * Available since NNAPI feature level 3.
3528      */
3529     ANEURALNETWORKS_GREATER = 53,
3530     /**
3531      * For input tensors x and y, computes x >= y elementwise.
3532      *
3533      * Supported tensor {@link OperandCode}:
3534      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3535      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3536      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3537      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3538      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3539      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3540      *
3541      * Supported tensor rank: from 1
3542      *
3543      * This operation supports broadcasting.
3544      *
3545      * Inputs:
3546      * * 0: A tensor.
3547      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3548      *      with input0.
3549      *
3550      * Outputs:
3551      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3552      *
3553      * Available since NNAPI feature level 3.
3554      */
3555     ANEURALNETWORKS_GREATER_EQUAL = 54,
3556 
3557     /**
3558      * Performs a grouped 2-D convolution operation.
3559      *
3560      * Given an input tensor of shape [batches, height, width, depth_in] and a
3561      * filter tensor of shape [depth_out, filter_height, filter_width, depth_group]
3562      * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV
3563      * applies a group of different filters to each input channel group, then
3564      * concatenates the results together.
3565      *
3566      * Specifically, the input channels are divided into num_groups groups, each with
3567      * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional
3568      * filters are also divided into num_groups groups, i.e. depth_out is divisible
3569      * by num_groups. GROUPED_CONV applies each group of filters to the corresponding
3570      * input channel group, and the result are concatenated together.
3571      *
3572      * The output dimensions are functions of the filter dimensions, stride, and
3573      * padding.
3574      *
3575      * The values in the output tensor are computed as:
3576      *
3577      *     output[b, i, j, g * channel_multiplier + q] =
3578      *         sum_{di, dj, dk} (
3579      *             input[b, strides[1] * i + di, strides[2] * j + dj,
3580      *                   g * depth_group + dk] *
3581      *             filter[g * channel_multiplier + q, di, dj, dk]
3582      *         ) + bias[channel]
3583      *
3584      * where channel_multiplier = depth_out / num_groups
3585      *
3586      * Supported tensor {@link OperandCode} configurations:
3587      * * 16 bit floating point:
3588      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
3589      *
3590      * * 32 bit floating point:
3591      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
3592      *
3593      * * Quantized:
3594      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
3595      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
3596      * * * input.scale * filter.scale).
3597      *
3598      * * Quantized signed (since NNAPI feature level 4):
3599      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
3600      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
3601      * * * input.scale * filter.scale).
3602      *
3603      * * Quantized with symmetric per channel quantization for the filter:
3604      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
3605      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
3606      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
3607      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
3608      *
3609      * * Quantized signed with filter symmetric per channel quantization
3610      *   (since NNAPI feature level 4):
3611      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
3612      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
3613      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
3614      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
3615      *
3616      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3617      * With the default data layout NHWC, the data is stored in the order of:
3618      * [batch, height, width, channels]. Alternatively, the data layout could
3619      * be NCHW, the data storage order of: [batch, channels, height, width].
3620      *
3621      * Both explicit padding and implicit padding are supported.
3622      *
3623      * Inputs (explicit padding):
3624      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
3625      *      specifying the input, where depth_in = num_groups * depth_group.
3626      * * 1: A 4-D tensor, of shape
3627      *      [depth_out, filter_height, filter_width, depth_group], specifying
3628      *      the filter, where depth_out must be divisible by num_groups.  For
3629      *      tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
3630      *      the channel dimension (channelDim at
3631      *      {@link ANeuralNetworksSymmPerChannelQuantParams}) must be set to 0.
3632      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
3633      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
3634      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type.
3635      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3636      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
3637      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
3638      *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
3639      *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
3640      *      should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of
3641      *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
3642      *      bias_scale[i] = input_scale * filter_scale[i].
3643      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3644      *      the left, in the ‘width’ dimension.
3645      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3646      *      the right, in the ‘width’ dimension.
3647      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3648      *      the top, in the ‘height’ dimension.
3649      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
3650      *      the bottom, in the ‘height’ dimension.
3651      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3652      *      walking through input in the ‘width’ dimension.
3653      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3654      *      walking through input in the ‘height’ dimension.
3655      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
3656      *      groups.
3657      * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
3658      *       {@link FuseCode} values. Specifies the activation to
3659      *       invoke on the result.
3660      * * 11: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3661      *       NCHW data layout for input0 and output0. Set to false for NHWC.
3662      *
3663      * Inputs (implicit padding):
3664      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
3665      *      specifying the input, where depth_in = num_groups * depth_group.
3666      * * 1: A 4-D tensor, of shape
3667      *      [depth_out, filter_height, filter_width, depth_group], specifying
3668      *      the filter, where depth_out must be divisible by num_groups.  For
3669      *      tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
3670      *      the channel dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim)
3671      *      must be set to 0.
3672      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
3673      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
3674      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same
3675      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same type.
3676      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3677      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
3678      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
3679      *      of 0 and bias_scale == input_scale * filter_scale. For filter tensor
3680      *      of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
3681      *      should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of
3682      *      0 and bias_scale of 0. The actual scale of each value 'i' is equal to
3683      *      bias_scale[i] = input_scale * filter_scale[i].
3684      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
3685      *      padding scheme, has to be one of the
3686      *      {@link PaddingCode} values.
3687      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3688      *      walking through input in the ‘width’ dimension.
3689      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
3690      *      walking through input in the ‘height’ dimension.
3691      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
3692      *      groups.
3693      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
3694      *      {@link FuseCode} values. Specifies the activation to
3695      *      invoke on the result.
3696      * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3697      *      NCHW data layout for input0 and output0. Set to false for NHWC.
3698      *
3699      * Outputs:
3700      * * 0: The output 4-D tensor, of shape
3701      *      [batches, out_height, out_width, depth_out].
3702      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
3703      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3704      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
3705      *
3706      * Available since NNAPI feature level 3.
3707      */
3708     ANEURALNETWORKS_GROUPED_CONV_2D = 55,
3709 
3710     /**
3711      * Localize the maximum keypoints from heatmaps.
3712      *
3713      * This operation approximates the accurate maximum keypoint scores and
3714      * indices after bicubic upscaling by using Taylor expansion up to the
3715      * quadratic term.
3716      *
3717      * The bounding box is represented by its upper-left corner coordinate
3718      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
3719      * A valid bounding box should satisfy x1 <= x2 and y1 <= y2.
3720      *
3721      * Supported tensor {@link OperandCode}:
3722      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3723      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3724      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3725      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3726      *
3727      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3728      * With the default data layout NHWC, the data is stored in the order of:
3729      * [batch, height, width, channels]. Alternatively, the data layout could
3730      * be NCHW, the data storage order of: [batch, channels, height, width].
3731      *
3732      * Inputs:
3733      * * 0: A 4-D Tensor of shape
3734      *      [num_boxes, heatmap_size, heatmap_size, num_keypoints],
3735      *      specifying the heatmaps, the height and width of heatmaps should
3736      *      be the same, and must be greater than or equal to 2.
3737      * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes,
3738      *      each with format [x1, y1, x2, y2]. For input0 of type
3739      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should
3740      *      be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint
3741      *      of 0 and scale of 0.125.
3742      *      For input0 of type
3743      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}, this tensor
3744      *      should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with
3745      *      zeroPoint of -128 and scale of 0.125.
3746      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3747      *      NCHW data layout for input0. Set to false for NHWC.
3748      *
3749      * Outputs:
3750      * * 0: A tensor of the same {@link OperandCode} as input0, with shape
3751      *      [num_boxes, num_keypoints], specifying score of the keypoints.
3752      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or
3753      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
3754      *      the scale and zeroPoint can be different from input0 scale and zeroPoint.
3755      * * 1: A tensor of the same {@link OperandCode} as input1, with shape
3756      *      [num_boxes, num_keypoints, 2], specifying the location of
3757      *      the keypoints, the second dimension is organized as
3758      *      [keypoint_x, keypoint_y].
3759      *      For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
3760      *      scale must be 0.125 and the zero point must be 0.
3761      *
3762      * Available since NNAPI feature level 3.
3763      */
3764     ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT = 56,
3765 
3766     /**
3767      * Applies instance normalization to the input tensor.
3768      *
3769      * The values in the output tensor are computed as:
3770      *
3771      *     output[b, h, w, c] =
3772      *         (input[b, h, w, c] - mean[b, c]) * gamma /
3773      *         sqrt(var[b, c] + epsilon) + beta
3774      *
3775      * Where the mean and variance are computed across the spatial dimensions:
3776      *
3777      *     mean[b, c] =
3778      *         sum_{h, w}(input[b, h, w, c]) / sum(1)
3779      *
3780      *     var[b, c] =
3781      *         sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1)
3782      *
3783      * Supported tensor {@link OperandCode}:
3784      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3785      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3786      *
3787      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
3788      * With the default data layout NHWC, the data is stored in the order of:
3789      * [batch, height, width, channels]. Alternatively, the data layout could
3790      * be NCHW, the data storage order of: [batch, channels, height, width].
3791      *
3792      * Inputs:
3793      * * 0: An n-D tensor, specifying the tensor to be normalized.
3794      * * 1: A scalar, specifying gamma, the scale applied to the normalized
3795      *      tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
3796      *      input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3797      *      {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3798      *      {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3799      * * 2: A scalar, specifying beta, the offset applied to the normalized
3800      *      tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
3801      *      input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3802      *      {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3803      *      {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3804      * * 3: A scalar, specifying epsilon, the small value added to variance to
3805      *      avoid dividing by zero. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
3806      *      input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
3807      *      {@link ANEURALNETWORKS_FLOAT32} if input0 is of
3808      *      {@link ANEURALNETWORKS_TENSOR_FLOAT32}.
3809      * * 4: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
3810      *      NCHW data layout for input0 and output0. Set to false for NHWC.
3811      *
3812      * Outputs:
3813      * * 0: A tensor of the same {@link OperandCode} and same shape as input0.
3814      *
3815      * Available since NNAPI feature level 3.
3816      */
3817     ANEURALNETWORKS_INSTANCE_NORMALIZATION = 57,
3818 
3819     /**
3820      * For input tensors x and y, computes x < y elementwise.
3821      *
3822      * Supported tensor {@link OperandCode}:
3823      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3824      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3825      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3826      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3827      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3828      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3829      *
3830      * Supported tensor rank: from 1
3831      *
3832      * This operation supports broadcasting.
3833      *
3834      * Inputs:
3835      * * 0: A tensor.
3836      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3837      *      with input0.
3838      *
3839      * Outputs:
3840      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3841      *
3842      * Available since NNAPI feature level 3.
3843      */
3844     ANEURALNETWORKS_LESS = 58,
3845 
3846     /**
3847      * For input tensors x and y, computes x <= y elementwise.
3848      *
3849      * Supported tensor {@link OperandCode}:
3850      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3851      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3852      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3853      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3854      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3855      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3856      *
3857      * Supported tensor rank: from 1
3858      *
3859      * This operation supports broadcasting.
3860      *
3861      * Inputs:
3862      * * 0: A tensor.
3863      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
3864      *      with input0.
3865      *
3866      * Outputs:
3867      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3868      *
3869      * Available since NNAPI feature level 3.
3870      */
3871     ANEURALNETWORKS_LESS_EQUAL = 59,
3872 
3873     /**
3874      * Computes natural logarithm of x element-wise.
3875      *
3876      * Supported tensor {@link OperandCode}:
3877      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3878      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3879      *
3880      * Supported tensor rank: from 1.
3881      *
3882      * Inputs:
3883      * * 0: A tensor.
3884      *
3885      * Outputs:
3886      * * 0: The output tensor of same shape as input0.
3887      *
3888      * Available since NNAPI feature level 3.
3889      */
3890     ANEURALNETWORKS_LOG = 60,
3891 
3892     /**
3893      * Returns the truth value of x AND y element-wise.
3894      *
3895      * Supported tensor {@link OperandCode}:
3896      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3897      *
3898      * Supported tensor rank: from 1
3899      *
3900      * This operation supports broadcasting.
3901      *
3902      * Inputs:
3903      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3904      * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions
3905      *      compatible with input0.
3906      *
3907      * Outputs:
3908      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3909      *
3910      * Available since NNAPI feature level 3.
3911      */
3912     ANEURALNETWORKS_LOGICAL_AND = 61,
3913 
3914     /**
3915      * Computes the truth value of NOT x element-wise.
3916      *
3917      * Supported tensor {@link OperandCode}:
3918      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3919      *
3920      * Supported tensor rank: from 1.
3921      *
3922      * Inputs:
3923      * * 0: A tensor.
3924      *
3925      * Outputs:
3926      * * 0: The output tensor of same shape as input0.
3927      *
3928      * Available since NNAPI feature level 3.
3929      */
3930     ANEURALNETWORKS_LOGICAL_NOT = 62,
3931 
3932     /**
3933      * Returns the truth value of x OR y element-wise.
3934      *
3935      * Supported tensor {@link OperandCode}:
3936      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
3937      *
3938      * Supported tensor rank: from 1
3939      *
3940      * This operation supports broadcasting.
3941      *
3942      * Inputs:
3943      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3944      * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions
3945      *      compatible with input0.
3946      *
3947      * Outputs:
3948      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
3949      *
3950      * Available since NNAPI feature level 3.
3951      */
3952     ANEURALNETWORKS_LOGICAL_OR = 63,
3953 
3954     /**
3955      * Computes the log softmax activations given logits.
3956      *
3957      * The output is calculated using this formula:
3958      *
3959      *     output = logits * beta - log(reduce_sum(exp(logits * beta), axis))
3960      *
3961      * Supported tensor {@link OperandCode}:
3962      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3963      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3964      *
3965      * Supported tensor rank: from 1.
3966      *
3967      * Inputs:
3968      * * 0: A tensor specifying the input logits.
3969      * * 1: A scalar, specifying the positive scaling factor for the exponent,
3970      *      beta.
3971      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta
3972      *      value must be of {@link ANEURALNETWORKS_FLOAT16}.
3973      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta
3974      *      value must be of {@link ANEURALNETWORKS_FLOAT32}.
3975      * * 2: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
3976      *      reduce across. Negative index is used to specify axis from the
3977      *      end (e.g. -1 for the last axis). Must be in the range [-n, n).
3978      *
3979      * Outputs:
3980      * * 0: The output tensor of the same {@link OperandCode} and shape as
3981      *      input0.
3982      *
3983      * Available since NNAPI feature level 3.
3984      */
3985     ANEURALNETWORKS_LOG_SOFTMAX = 64,
3986 
3987     /**
3988      * Returns the element-wise maximum of two tensors.
3989      *
3990      * Supported tensor {@link OperandCode}:
3991      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
3992      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
3993      * * {@link ANEURALNETWORKS_TENSOR_INT32}
3994      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
3995      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
3996      *
3997      * Supported tensor rank: from 1.
3998      *
3999      * Inputs:
4000      * * 0: A tensor.
4001      * * 1: A tensor of the same {@link OperandCode} and compatible dimensions
4002      *      with input0.
4003      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4004      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4005      *
4006      * Outputs:
4007      * * 0: A tensor of the same {@link OperandCode} as input0.
4008      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4009      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4010      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4011      *
4012      * Available since NNAPI feature level 3.
4013      */
4014     ANEURALNETWORKS_MAXIMUM = 65,
4015 
4016     /**
4017      * Returns the element-wise minimum of two tensors.
4018      *
4019      * Supported tensor {@link OperandCode}:
4020      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4021      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4022      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4023      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4024      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4025      *
4026      * Supported tensor rank: from 1.
4027      *
4028      * Inputs:
4029      * * 0: A tensor.
4030      * * 1: A tensor of the same {@link OperandCode} and compatible dimensions
4031      *      with input0.
4032      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4033      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4034      *
4035      * Outputs:
4036      * * 0: A tensor of the same {@link OperandCode} as input0.
4037      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4038      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4039      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4040      *
4041      * Available since NNAPI feature level 3.
4042      */
4043     ANEURALNETWORKS_MINIMUM = 66,
4044 
4045     /**
4046      * Computes numerical negative value element-wise.
4047      *
4048      * Supported tensor {@link OperandCode}:
4049      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4050      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4051      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4052      *
4053      * Supported tensor rank: from 1.
4054      *
4055      * Inputs:
4056      * * 0: A tensor.
4057      *
4058      * Outputs:
4059      * * 0: The output tensor of same shape as input0.
4060      *
4061      * Available since NNAPI feature level 3.
4062      */
4063     ANEURALNETWORKS_NEG = 67,
4064 
4065     /**
4066      * For input tensors x and y, computes x != y elementwise.
4067      *
4068      * Supported tensor {@link OperandCode}:
4069      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
4070      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4071      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4072      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4073      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4074      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4075      *
4076      * Supported tensor rank: from 1
4077      *
4078      * This operation supports broadcasting.
4079      *
4080      * Inputs:
4081      * * 0: A tensor.
4082      * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
4083      *      with input0.
4084      *
4085      * Outputs:
4086      * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
4087      *
4088      * Available since NNAPI feature level 3.
4089      */
4090     ANEURALNETWORKS_NOT_EQUAL = 68,
4091 
4092     /**
4093      * Pads a tensor with the given constant value according to the specified
4094      * paddings.
4095      *
4096      * Supported tensor {@link OperandCode}:
4097      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4098      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4099      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4100      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4101      *
4102      * Supported tensor rank: up to 4
4103      *
4104      * Inputs:
4105      * * 0: An n-D tensor, specifying the tensor to be padded.
4106      * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
4107      *      for each spatial dimension of the input tensor. The shape of the
4108      *      tensor must be {rank(input0), 2}.
4109      *      padding[i, 0] specifies the number of elements to be padded in the
4110      *      front of dimension i.
4111      *      padding[i, 1] specifies the number of elements to be padded after
4112      *      the end of dimension i.
4113      * * 2: A scalar specifying the value to use for padding input0.
4114      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the
4115      *      pad value must be of {@link ANEURALNETWORKS_FLOAT16}.
4116      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the
4117      *      pad value must be of {@link ANEURALNETWORKS_FLOAT32}.
4118      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4119      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
4120      *      the pad value must be of {@link ANEURALNETWORKS_INT32}. The
4121      *      scale and zeroPoint are assumed to be the same as in input0.
4122      *
4123      * Outputs:
4124      * * 0: A tensor of the same {@link OperandCode} as input0. The
4125      *      output tensor has the same rank as input0, and each
4126      *      dimension of the output tensor has the same size as the
4127      *      corresponding dimension of the input tensor plus the size
4128      *      of the padding:
4129      *          output0.dimension[i] =
4130      *              padding[i, 0] + input0.dimension[i] + padding[i, 1]
4131      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4132      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4133      *      the scale and zeroPoint must be the same as input0.
4134      *
4135      * Available since NNAPI feature level 3.
4136      */
4137     ANEURALNETWORKS_PAD_V2 = 69,
4138 
4139     /**
4140      * Computes the power of one value to another.
4141      *
4142      * Given a tensor base and a tensor exponent, this operation computes
4143      * base^exponent elementwise.
4144      *
4145      * This operations supports broadcasting. The size of the output is the
4146      * maximum size along each dimension of the input operands. It starts with
4147      * the trailing dimensions, and works its way forward.
4148      *
4149      * For example:
4150      *     base.dimension     =    {4, 1, 2}
4151      *     exponent.dimension = {5, 4, 3, 1}
4152      *     output.dimension   = {5, 4, 3, 2}
4153      *
4154      * Supported tensor {@link OperandCode}:
4155      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4156      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4157      *
4158      * Supported tensor rank: from 1
4159      *
4160      * Inputs:
4161      * * 0: A tensor specifying the base.
4162      * * 1: A tensor specifying the exponent.
4163      *
4164      * Outputs:
4165      * * 0: An output tensor.
4166      *
4167      * Available since NNAPI feature level 3.
4168      */
4169     ANEURALNETWORKS_POW = 70,
4170 
4171     /**
4172      * Parametric Rectified Linear Unit.
4173      *
4174      * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha
4175      * is a learned array with the same {@link OperandCode} and compatible
4176      * dimensions as input x.
4177      *
4178      * Two dimensions are compatible when:
4179      *     1. they are equal, or
4180      *     2. one of them is 1
4181      *
4182      * The size of the output is the maximum size along each dimension of the
4183      * input operands. It starts with the trailing dimensions, and works its way
4184      * forward.
4185      *
4186      * Example:
4187      *     input.dimension  =    {4, 1, 2}
4188      *     alpha.dimension  = {5, 4, 3, 1}
4189      *     output.dimension = {5, 4, 3, 2}
4190      *
4191      * Supported tensor {@link OperandCode}:
4192      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4193      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4194      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4195      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4196      *
4197      * Supported tensor rank: from 1
4198      *
4199      * Inputs:
4200      * * 0: A tensor, specifying the input.
4201      * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
4202      *      as input0, specifying the alpha.
4203      *
4204      * Outputs:
4205      * * 0: A tensor of the same {@link OperandCode} as input0.
4206      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4207      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4208      *      the scales and zeroPoint can be different from input0 scale and zeroPoint.
4209      *
4210      * Available since NNAPI feature level 3.
4211      */
4212     ANEURALNETWORKS_PRELU = 71,
4213 
4214     /**
4215      * Quantizes the input tensor.
4216      *
4217      * The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} output tensor is:
4218      *
4219      *     output = max(0, min(255, round(input / scale) + zeroPoint)
4220      *
4221      * The formula for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} output
4222      * tensor is:
4223      *
4224      *     output = max(-128, min(127, round(input / scale) + zeroPoint)
4225      *
4226      * Supported input tensor {@link OperandCode}:
4227      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4228      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4229      *
4230      * Supported output tensor {@link OperandCode}:
4231      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4232      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4233      *
4234      * Supported tensor rank: from 1
4235      *
4236      * Inputs:
4237      * * 0: A tensor, may be zero-sized.
4238      *
4239      * Outputs:
4240      * * 0: The output tensor of same shape as input0, but with
4241      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} or.
4242      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}.
4243      *
4244      * Available since NNAPI feature level 3.
4245      */
4246     ANEURALNETWORKS_QUANTIZE = 72,
4247 
4248     /**
4249      * A version of quantized LSTM, using 16 bit quantization for internal
4250      * state.
4251      *
4252      * There is no projection layer, so cell state size is equal to the output
4253      * size.
4254      *
4255      * Inputs:
4256      * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4257      *      and shape [numBatches, inputSize] specifying the input to the LSTM
4258      *      cell. Tensor is quantized with a fixed quantization range of
4259      *      [-1, 127/128] (scale = 1/128, zeroPoint = 128).
4260      * * 1: The input-to-input weights.
4261      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4262      *      and shape [outputSize, inputSize] specifying input-to-input part of
4263      *      weights for fully-connected layer inside the LSTM cell.
4264      *      Quantization zero point and scale must be the same across all the
4265      *      weights.
4266      * * 2: The input-to-forget weights.
4267      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4268      *      and shape [outputSize, inputSize] specifying input-to-forget part of
4269      *      weights for fully-connected layer inside the LSTM cell.
4270      *      Quantization zero point and scale must be the same across all the
4271      *      weights.
4272      * * 3: The input-to-cell weights.
4273      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4274      *      and shape [outputSize, inputSize] specifying input-to-cell part of
4275      *      weights for fully-connected layer inside the LSTM cell.
4276      *      Quantization zero point and scale must be the same across all the
4277      *      weights.
4278      * * 4: The input-to-output weights.
4279      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4280      *      and shape [outputSize, inputSize] specifying input-to-output part of
4281      *      weights for fully-connected layer inside the LSTM cell.
4282      *      Quantization zero point and scale must be the same across all the
4283      *      weights.
4284      * * 5: The recurrent-to-input weights.
4285      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4286      *      and shape [outputSize, outputSize] specifying recurrent-to-input part
4287      *      of weights for fully-connected layer inside the LSTM cell.
4288      *      Quantization zero point and scale must be the same across all the
4289      *      weights.
4290      * * 6: The recurrent-to-forget weights.
4291      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4292      *      and shape [outputSize, outputSize] specifying recurrent-to-forget
4293      *      part of weights for fully-connected layer inside the LSTM cell.
4294      *      Quantization zero point and scale must be the same across all the
4295      *      weights.
4296      * * 7: The recurrent-to-cell weights.
4297      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4298      *      and shape [outputSize, outputSize] specifying recurrent-to-cell part
4299      *      of weights for fully-connected layer inside the LSTM cell.
4300      *      Quantization zero point and scale must be the same across all the
4301      *      weights.
4302      * * 8: The recurrent-to-output weights.
4303      *      A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4304      *      and shape [outputSize, outputSize] specifying recurrent-to-output
4305      *      part of weights for fully-connected layer inside the LSTM cell.
4306      *      Quantization zero point and scale must be the same across all the
4307      *      weights.
4308      * * 9: The input gate bias.
4309      *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4310      *      [outputSize] specifying the bias for the fully-connected layer
4311      *      inside the LSTM cell. Bias is quantized with scale being a product
4312      *      of input and weights scales and zeroPoint equal to 0.
4313      * * 10:The forget gate bias.
4314      *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4315      *      [outputSize] specifying the bias for the fully-connected layer
4316      *      inside the LSTM cell. Bias is quantized with scale being a product
4317      *      of input and weights scales and zeroPoint equal to 0.
4318      * * 11:The cell bias.
4319      *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4320      *      [outputSize] specifying the bias for the fully-connected layer
4321      *      inside the LSTM cell. Bias is quantized with scale being a product
4322      *      of input and weights scales and zeroPoint equal to 0.
4323      * * 12:The output gate bias.
4324      *      A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
4325      *      [outputSize] specifying the bias for the fully-connected layer
4326      *      inside the LSTM cell. Bias is quantized with scale being a product
4327      *      of input and weights scales and zeroPoint equal to 0.
4328      * * 13: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
4329      *       and shape [numBatches, outputSize] specifying the cell state from the
4330      *       previous time step of the LSTM cell. It is quantized using a
4331      *       quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 /
4332      *       32768, zeroPoint = 0).
4333      * * 14: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4334      *       and shape [numBathes, outputSize] specifying the output of the LSTM
4335      *       cell from previous time-step. Tensor is quantized with a fixed
4336      *       quantization range of [-1, 127/128] (scale = 1/128, zeroPoint =
4337      *       128).
4338      *
4339      *
4340      * Outputs:
4341      * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
4342      *      and shape [numBatches, outputSize] which contains a cell state from
4343      *      the current time step. Tensor is quantized using a quantization
4344      *      range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint =
4345      *      0).
4346      * * 1: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4347      *      and shape [numBathes, outputSize] which contains the output value.
4348      *      Tensor is quantized with a fixed quantization range of [-1, 127/128]
4349      *      (scale = 1/128, zeroPoint = 128).
4350      */
4351     ANEURALNETWORKS_QUANTIZED_16BIT_LSTM = 73,
4352 
4353     /**
4354      * Draws samples from a multinomial distribution.
4355      *
4356      * Supported tensor {@link OperandCode}:
4357      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4358      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4359      *
4360      * Inputs:
4361      * * 0: A 2-D tensor with shape [batches, classes], specifying the
4362      *      unnormalized log-probabilities for all classes.
4363      * * 1: A scalar {@link ANEURALNETWORKS_INT32}, specifying the number of
4364      *      independent samples to draw for each row slice.
4365      * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [2],
4366      *      specifying seeds used to initialize the random distribution. If both
4367      *      provided seeds are 0, both will be randomly generated.
4368      * Outputs:
4369      * * 0: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape
4370      *      [batches, samples], containing the drawn samples.
4371      *
4372      * Available since NNAPI feature level 3.
4373      */
4374     ANEURALNETWORKS_RANDOM_MULTINOMIAL = 74,
4375 
4376     /**
4377      * Reduces a tensor by computing the "logical and" of elements along given
4378      * dimensions.
4379      *
4380      * If keep_dims is true, the reduced dimensions are
4381      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4382      * 1 for each entry in dimensions.
4383      *
4384      * Supported tensor {@link OperandCode}:
4385      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
4386      *
4387      * Supported tensor rank: up to 4
4388      *
4389      * Inputs:
4390      * * 0: An n-D tensor.
4391      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4392      *      to reduce. Dimension values must be in the range [-n, n).
4393      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4394      *      retains reduced dimensions with length 1.
4395      *
4396      * Outputs:
4397      * * 0: A tensor of the same {@link OperandCode} as input0.
4398      *      If all dimensions are reduced and keep_dims is false, the output
4399      *      shape is [1].
4400      *
4401      * Available since NNAPI feature level 3.
4402      */
4403     ANEURALNETWORKS_REDUCE_ALL = 75,
4404 
4405     /**
4406      * Reduces a tensor by computing the "logical or" of elements along given
4407      * dimensions.
4408      *
4409      * If keep_dims is true, the reduced dimensions are
4410      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4411      * 1 for each entry in dimensions.
4412      *
4413      * Supported tensor {@link OperandCode}:
4414      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
4415      *
4416      * Supported tensor rank: up to 4
4417      *
4418      * Inputs:
4419      * * 0: An n-D tensor.
4420      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4421      *      to reduce. Dimension values must be in the range [-n, n).
4422      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4423      *      retains reduced dimensions with length 1.
4424      *
4425      * Outputs:
4426      * * 0: A tensor of the same {@link OperandCode} as input0.
4427      *      If all dimensions are reduced and keep_dims is false, the output
4428      *      shape is [1].
4429      *
4430      * Available since NNAPI feature level 3.
4431      */
4432     ANEURALNETWORKS_REDUCE_ANY = 76,
4433 
4434     /**
4435      * Reduces a tensor by computing the maximum of elements along given
4436      * dimensions.
4437      *
4438      * If keep_dims is true, the reduced dimensions are
4439      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4440      * 1 for each entry in dimensions.
4441      *
4442      * Supported tensor {@link OperandCode}:
4443      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4444      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4445      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4446      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4447      *
4448      * Supported tensor rank: up to 4
4449      *
4450      * Inputs:
4451      * * 0: An n-D tensor.
4452      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4453      *      to reduce. Dimension values must be in the range [-n, n).
4454      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4455      *      retains reduced dimensions with length 1.
4456      *
4457      * Outputs:
4458      * * 0: A tensor of the same {@link OperandCode} as input0.
4459      *      If all dimensions are reduced and keep_dims is false, the output
4460      *      shape is [1].
4461      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4462      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4463      *      the scale and zeroPoint must be the same as input0.
4464      *
4465      * Available since NNAPI feature level 3.
4466      */
4467     ANEURALNETWORKS_REDUCE_MAX = 77,
4468 
4469     /**
4470      * Reduces a tensor by computing the minimum of elements along given
4471      * dimensions.
4472      *
4473      * If keep_dims is true, the reduced dimensions are
4474      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4475      * 1 for each entry in dimensions.
4476      *
4477      * Supported tensor {@link OperandCode}:
4478      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4479      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4480      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4481      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4482      *
4483      * Supported tensor rank: up to 4
4484      *
4485      * Inputs:
4486      * * 0: An n-D tensor.
4487      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4488      *      to reduce. Dimension values must be in the range [-n, n).
4489      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4490      *      retains reduced dimensions with length 1.
4491      *
4492      * Outputs:
4493      * * 0: A tensor of the same {@link OperandCode} as input0.
4494      *      If all dimensions are reduced and keep_dims is false, the output
4495      *      shape is [1].
4496      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4497      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4498      *      the scale and zeroPoint must be the same as input0.
4499      *
4500      * Available since NNAPI feature level 3.
4501      */
4502     ANEURALNETWORKS_REDUCE_MIN = 78,
4503 
4504     /**
4505      * Reduces a tensor by multiplying elements along given dimensions.
4506      *
4507      * If keep_dims is true, the reduced dimensions are
4508      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4509      * 1 for each entry in dimensions.
4510      *
4511      * Supported tensor {@link OperandCode}:
4512      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4513      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4514      *
4515      * Supported tensor rank: up to 4
4516      *
4517      * Inputs:
4518      * * 0: An n-D tensor.
4519      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4520      *      to reduce. Dimension values must be in the range [-n, n).
4521      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4522      *      retains reduced dimensions with length 1.
4523      *
4524      * Outputs:
4525      * * 0: A tensor of the same {@link OperandCode} as input0.
4526      *      If all dimensions are reduced and keep_dims is false, the output
4527      *      shape is [1].
4528      *
4529      * Available since NNAPI feature level 3.
4530      */
4531     ANEURALNETWORKS_REDUCE_PROD = 79,
4532 
4533     /**
4534      * Reduces a tensor by summing elements along given dimensions.
4535      *
4536      * If keep_dims is true, the reduced dimensions are
4537      * retained with length 1. Otherwise, the rank of the tensor is reduced by
4538      * 1 for each entry in dimensions.
4539      *
4540      * Supported tensor {@link OperandCode}:
4541      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4542      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4543      *
4544      * Supported tensor rank: up to 4
4545      *
4546      * Inputs:
4547      * * 0: An n-D tensor.
4548      * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
4549      *      to reduce. Dimension values must be in the range [-n, n).
4550      * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
4551      *      retains reduced dimensions with length 1.
4552      *
4553      * Outputs:
4554      * * 0: A tensor of the same {@link OperandCode} as input0.
4555      *      If all dimensions are reduced and keep_dims is false, the output
4556      *      shape is [1].
4557      *
4558      * Available since NNAPI feature level 3.
4559      */
4560     ANEURALNETWORKS_REDUCE_SUM = 80,
4561 
4562     /**
4563      * Select and scale the feature map of each region of interest to a unified
4564      * output size by average pooling sampling points from bilinear interpolation.
4565      *
4566      * The region of interest is represented by its upper-left corner coordinate
4567      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
4568      * A spatial scaling factor is applied to map into feature map coordinate.
4569      * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
4570      *
4571      * No rounding is applied in this operation. The sampling points are unified
4572      * distributed in the pooling bin and their values are calculated by bilinear
4573      * interpolation.
4574      *
4575      * Supported tensor {@link OperandCode}:
4576      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4577      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4578      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4579      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4580      *
4581      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4582      * With the default data layout NHWC, the data is stored in the order of:
4583      * [batch, height, width, channels]. Alternatively, the data layout could
4584      * be NCHW, the data storage order of: [batch, channels, height, width].
4585      *
4586      * Inputs:
4587      * * 0: A 4-D tensor, specifying the feature map.
4588      * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
4589      *      the regions of interest, each line with format [x1, y1, x2, y2].
4590      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
4591      *      this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
4592      *      with zeroPoint of 0 and scale of 0.125. Zero num_rois is
4593      *      supported for this tensor.
4594      * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
4595      *      [num_rois], specifying the batch index of each box. Boxes with
4596      *      the same batch index are grouped together. Zero num_rois is
4597      *      supported for this tensor.
4598      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4599      *      height of the output tensor.
4600      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4601      *      width of the output tensor.
4602      * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4603      *      from the height of original image to the height of feature map.
4604      * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4605      *      from the width of original image to the width of feature map.
4606      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
4607      *      sampling points in height dimension used to compute the output.
4608      *      Set to 0 for adaptive value of ceil(roi_height/out_height).
4609      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
4610      *      sampling points in width dimension used to compute the output.
4611      *      Set to 0 for adaptive value of ceil(roi_width/out_width).
4612      * * 9: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
4613      *      NCHW data layout for input0 and output0. Set to false for NHWC.
4614      *
4615      * Outputs:
4616      * * 0: A tensor of the same {@link OperandCode} as input0. The output
4617      *      shape is [num_rois, out_height, out_width, depth].
4618      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4619      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4620      *      the scale and zeroPoint can be different from the input0 scale and zeroPoint.
4621      *
4622      * Available since NNAPI feature level 3.
4623      */
4624     ANEURALNETWORKS_ROI_ALIGN = 81,
4625 
4626     /**
4627      * Select and scale the feature map of each region of interest to a unified
4628      * output size by max-pooling.
4629      *
4630      * The region of interest is represented by its upper-left corner coordinate
4631      * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
4632      * A spatial scaling factor is applied to map into feature map coordinate.
4633      * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
4634      *
4635      * Rounding is applied in this operation to ensure integer boundary for
4636      * regions of interest and pooling bins.
4637      *
4638      * Supported tensor {@link OperandCode}:
4639      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4640      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4641      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4642      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4643      *
4644      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4645      * With the default data layout NHWC, the data is stored in the order of:
4646      * [batch, height, width, channels]. Alternatively, the data layout could
4647      * be NCHW, the data storage order of: [batch, channels, height, width].
4648      *
4649      * Inputs:
4650      * * 0: A 4-D tensor, specifying the feature map.
4651      * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
4652      *      the regions of interest, each line with format [x1, y1, x2, y2].
4653      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4654      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4655      *      this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
4656      *      with zeroPoint of 0 and scale of 0.125.
4657      * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
4658      *      [num_rois], specifying the batch index of each box. Boxes with
4659      *      the same batch index are grouped together.
4660      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4661      *      height of the output tensor.
4662      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
4663      *      width of the output tensor.
4664      * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4665      *      from the height of original image to the height of feature map.
4666      * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
4667      *      from the width of original image to the width of feature map.
4668      * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
4669      *      NCHW data layout for input0 and output0. Set to false for NHWC.
4670      *
4671      * Outputs:
4672      * * 0: A tensor of the same {@link OperandCode} as input0. The output
4673      *      shape is [num_rois, out_height, out_width, depth].
4674      *      For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4675      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4676      *      the scale and zeroPoint must be the same as input0.
4677      *
4678      * Available since NNAPI feature level 3.
4679      */
4680     ANEURALNETWORKS_ROI_POOLING = 82,
4681 
4682     /**
4683      * Computes reciprocal of square root of x element-wise.
4684      *
4685      * Supported tensor {@link OperandCode}:
4686      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4687      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4688      *
4689      * Supported tensor rank: from 1.
4690      *
4691      * Inputs:
4692      * * 0: A tensor.
4693      *
4694      * Outputs:
4695      * * 0: The output tensor of same shape as input0.
4696      *
4697      * Available since NNAPI feature level 3.
4698      */
4699     ANEURALNETWORKS_RSQRT = 83,
4700 
4701     /**
4702      * Using a tensor of booleans c and input tensors x and y select values
4703      * elementwise from both input tensors:
4704      *
4705      * O[i] = C[i] ? x[i] : y[i].
4706      *
4707      * Supported tensor {@link OperandCode}:
4708      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4709      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4710      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4711      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4712      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4713      *
4714      * Supported tensor rank: from 1
4715      *
4716      * Inputs:
4717      * * 0: A tensor of type {@link ANEURALNETWORKS_TENSOR_BOOL8} acting as a
4718      *      mask that chooses, based on the value at each element, whether the
4719      *      corresponding element in the output should be taken from input1 (if
4720      *      true) or input2 (if false).
4721      * * 1: An input tensor of the same shape as input0.
4722      * * 2: An input tensor of the same shape and type as input1.
4723      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4724      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4725      *      the scales and zeroPoint can be different from input1 scale and zeroPoint.
4726      *
4727      * Outputs:
4728      * * 0: A tensor of the same type and shape as input1 and input2.
4729      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
4730      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
4731      *
4732      * Available since NNAPI feature level 3.
4733      */
4734     ANEURALNETWORKS_SELECT = 84,
4735 
4736     /**
4737      * Computes sin of x element-wise.
4738      *
4739      * Supported tensor {@link OperandCode}:
4740      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4741      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4742      *
4743      * Supported tensor rank: from 1.
4744      *
4745      * Inputs:
4746      * * 0: A tensor.
4747      *
4748      * Outputs:
4749      * * 0: The output tensor of same shape as input0.
4750      *
4751      * Available since NNAPI feature level 3.
4752      */
4753     ANEURALNETWORKS_SIN = 85,
4754 
4755     /**
4756      * Extracts a slice of specified size from the input tensor starting at a
4757      * specified location.
4758      *
4759      * The starting location is specified as a 1-D tensor containing offsets
4760      * for each dimension. The size is specified as a 1-D tensor containing
4761      * either size of a slice along corresponding dimension or -1. In the latter
4762      * case, all the remaining elements in dimension are included in the slice.
4763      *
4764      * A sum of begin offset and a size of a slice must not exceed size of a
4765      * corresponding dimension.
4766      *
4767      * Supported tensor {@link OperandCode}:
4768      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4769      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4770      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4771      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4772      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4773      *
4774      * Supported tensor rank: from 1
4775      *
4776      * Inputs:
4777      * * 0: An n-D tensor to take slice from, may be zero-sized.
4778      * * 1: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying
4779      *      the beginning indices of the slice in each dimension.
4780      * * 2: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying
4781      *      the size of the slice in each dimension.
4782      *
4783      * Outputs:
4784      * * 0: An n-D tensor of the same type as the input containing the slice.
4785      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4786      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4787      *      its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
4788      *
4789      * Available since NNAPI feature level 3.
4790      */
4791     ANEURALNETWORKS_SLICE = 86,
4792 
4793     /**
4794      * Splits a tensor along a given axis into num_splits subtensors.
4795      *
4796      * Supported tensor {@link OperandCode}:
4797      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4798      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4799      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4800      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4801      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4802      *
4803      * Supported tensor rank: from 1
4804      *
4805      * Inputs:
4806      * * 0: An n-D tensor to split.
4807      * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis along
4808      *      which to split.
4809      * * 2: An {@link ANEURALNETWORKS_INT32} scalar indicating the number of
4810      *      splits along given axis. Must evenly divide axis size.
4811      *
4812      * Outputs:
4813      * * 0 ~ (num_splits - 1): Resulting subtensors.
4814      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4815      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4816      *      the scale and zeroPoint must be the same as input0.
4817      *
4818      * Available since NNAPI feature level 3.
4819      */
4820     ANEURALNETWORKS_SPLIT = 87,
4821 
4822     /**
4823      * Computes square root of x element-wise.
4824      *
4825      * Supported tensor {@link OperandCode}:
4826      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4827      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4828      *
4829      * Supported tensor rank: from 1.
4830      *
4831      * Inputs:
4832      * * 0: A tensor.
4833      *
4834      * Outputs:
4835      * * 0: The output tensor of same shape as input0.
4836      *
4837      * Available since NNAPI feature level 3.
4838      */
4839     ANEURALNETWORKS_SQRT = 88,
4840 
4841     /**
4842      * Constructs a tensor by tiling a given tensor.
4843      *
4844      * This operation creates a new tensor by replicating `input` `multiples`
4845      * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]`
4846      * elements, and the values of `input` are replicated `multiples[i]` times
4847      * along the i-th dimension.
4848      * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`.
4849      *
4850      * Supported tensor {@link OperandCode}:
4851      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4852      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4853      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4854      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4855      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4856      *
4857      * Supported tensor rank: from 1
4858      *
4859      * Inputs:
4860      * * 0: input, an n-D tensor specifying the input.
4861      * * 1: multiples, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}.
4862      *      The length of multiples must be n.
4863      *
4864      * Outputs:
4865      * * 0: A tiled tensor of the same {@link OperandCode} and rank as `input`.
4866      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4867      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4868      *      the scale and zeroPoint must be the same as input0.
4869      *
4870      * Available since NNAPI feature level 3.
4871      */
4872     ANEURALNETWORKS_TILE = 89,
4873 
4874     /**
4875      * Finds values and indices of the k largest entries for the last dimension.
4876      *
4877      * Resulting values in each dimensions are sorted in descending order. If
4878      * two values are equal, the one with larger index appears first.
4879      *
4880      * Supported tensor {@link OperandCode}:
4881      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
4882      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
4883      * * {@link ANEURALNETWORKS_TENSOR_INT32}
4884      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4885      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
4886      *
4887      * Supported tensor rank: from 1
4888      *
4889      * Inputs:
4890      * * 0: input, an n-D tensor specifying the input.
4891      * * 1: k, an {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
4892      *      top elements to look for along the last dimension.
4893      *
4894      * Outputs:
4895      * * 0: An n-D tensor of the same type as the input, containing the k
4896      *      largest elements along each last dimensional slice.
4897      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
4898      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
4899      *      the scale and zeroPoint must be the same as input0.
4900      * * 1: An n-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32}
4901      *      containing the indices of values within the last dimension of input.
4902      *
4903      * Available since NNAPI feature level 3.
4904      */
4905     ANEURALNETWORKS_TOPK_V2 = 90,
4906 
4907     /**
4908      * Performs the transpose of 2-D convolution operation.
4909      *
4910      * This operation is sometimes called "deconvolution" after Deconvolutional
4911      * Networks, but is actually the transpose (gradient) of
4912      * {@link ANEURALNETWORKS_CONV_2D} rather than an actual deconvolution.
4913      *
4914      * The output dimensions are functions of the filter dimensions, stride, and
4915      * padding.
4916      *
4917      * Supported tensor {@link OperandCode} configurations:
4918      * * 16 bit floating point:
4919      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
4920      *
4921      * * 32 bit floating point:
4922      * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
4923      *
4924      * * Quantized:
4925      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
4926      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
4927      * * * input.scale * filter.scale).
4928      *
4929      * * Quantized with symmetric per channel quantization for the filter:
4930      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
4931      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
4932      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
4933      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
4934      *
4935      * Available since NNAPI feature level 4:
4936      * * Quantized signed (since NNAPI feature level 4):
4937      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, filter, and output.
4938      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
4939      * * * input.scale * filter.scale).
4940      *
4941      * * Quantized signed with filter symmetric per channel quantization
4942      *   (since NNAPI feature level 4):
4943      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} for input, and output.
4944      * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
4945      * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
4946      * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
4947      *
4948      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
4949      * With the default data layout NHWC, the data is stored in the order of:
4950      * [batch, height, width, channels]. Alternatively, the data layout could
4951      * be NCHW, the data storage order of: [batch, channels, height, width].
4952      *
4953      * Both explicit padding and implicit padding are supported.
4954      *
4955      * Inputs (explicit padding):
4956      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
4957      *      specifying the input.
4958      *      Since API level 29, zero batches is supported for this tensor.
4959      * * 1: A 4-D tensor, of shape
4960      *      [depth_out, filter_height, filter_width, depth_in], specifying the
4961      *      filter. For tensor of type
4962      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
4963      *      dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.
4964      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
4965      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
4966      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the
4967      *      same type.
4968      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
4969      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
4970      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},
4971      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
4972      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
4973      *      the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
4974      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
4975      *      bias_scale[i] = input_scale * filter_scale[i].
4976      * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4977      *      the left, in the ‘width’ dimension.
4978      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4979      *      the right, in the ‘width’ dimension.
4980      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4981      *      the top, in the ‘height’ dimension.
4982      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
4983      *      the bottom, in the ‘height’ dimension.
4984      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
4985      *      walking through input in the ‘width’ dimension.
4986      * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
4987      *      walking through input in the ‘height’ dimension.
4988      * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
4989      *      {@link FuseCode} values. Specifies the activation to
4990      *      invoke on the result.
4991      * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
4992      *       NCHW data layout for input0 and output0. Set to false for NHWC.
4993      *
4994      * Inputs (implicit padding):
4995      * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
4996      *      specifying the input.
4997      *      Since API level 29, zero batches is supported for this tensor.
4998      * * 1: A 4-D tensor, of shape
4999      *      [depth_out, filter_height, filter_width, depth_in], specifying the
5000      *      filter. For tensor of type
5001      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
5002      *      dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.
5003      * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
5004      *      tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
5005      *      {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias should be of the
5006      *      same type.
5007      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5008      *      and {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED},
5009      *      the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32},
5010      *      with zeroPoint of 0 and bias_scale == input_scale * filter_scale.
5011      *      For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL},
5012      *      the bias must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0
5013      *      and bias_scale of 0. The actual scale of each value 'i' is equal to
5014      *      bias_scale[i] = input_scale * filter_scale[i].
5015      * * 3: An {@link ANEURALNETWORKS_TENSOR_INT32} tensor, specifying the output
5016      *      tensor shape.
5017      * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
5018      *      padding scheme, has to be one of the
5019      *      {@link PaddingCode} values.
5020      * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
5021      *      walking through input in the ‘width’ dimension.
5022      * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
5023      *      walking through input in the ‘height’ dimension.
5024      * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
5025      *      {@link FuseCode} values. Specifies the activation to
5026      *      invoke on the result.
5027      * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
5028      *      NCHW data layout for input0 and output0. Set to false for NHWC.
5029      *
5030      * Outputs:
5031      * * 0: The output 4-D tensor, of shape
5032      *      [batches, out_height, out_width, depth_out].
5033      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
5034      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5035      *      the scale and zeroPoint can be different from inputs' scale and zeroPoint.
5036      *
5037      * Available since NNAPI feature level 3.
5038      */
5039     ANEURALNETWORKS_TRANSPOSE_CONV_2D = 91,
5040 
5041     /**
5042      * A recurrent neural network specified by an LSTM cell.
5043      *
5044      * Performs (fully) dynamic unrolling of input.
5045      *
5046      * This Op unrolls the input along the time dimension, and implements the
5047      * following operation for each element in the sequence
5048      * s = 1...sequence_length:
5049      *   outputs[s] = projection(state = activation(LSTMOp(inputs[s])))
5050      *
5051      * Where LSTMOp is the LSTM op as in {@link ANEURALNETWORKS_LSTM},
5052      * the "projection" is an optional projection layer from state and output
5053      * and the “activation” is the function passed as the
5054      * “fused_activation_function” argument (if not “NONE”).
5055      *
5056      * Supported tensor {@link OperandCode}:
5057      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5058      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5059      *
5060      * Supported tensor rank: 3, either time-major or batch-major.
5061      *
5062      * All input and output tensors must be of the same type.
5063      *
5064      * Inputs:
5065      * * 0: The input (\f$x_t\f$).
5066      *      A 3-D tensor of shape:
5067      *        If time-major: [max_time, batch_size, input_size]
5068      *        If batch-major: [batch_size, max_time, input_size]
5069      *      where “max_time” is the number of timesteps (sequence length),
5070      *      “batch_size” corresponds to the batching dimension, and
5071      *      “input_size” is the size of the input.
5072      * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
5073      *      A 2-D tensor of shape [num_units, input_size], where “num_units”
5074      *      corresponds to the number of cell units.
5075      * * 2: The input-to-forget weights (\f$W_{xf}\f$).
5076      *      A 2-D tensor of shape [num_units, input_size].
5077      * * 3: The input-to-cell weights (\f$W_{xc}\f$).
5078      *      A 2-D tensor of shape [num_units, input_size].
5079      * * 4: The input-to-output weights (\f$W_{xo}\f$).
5080      *      A 2-D tensor of shape [num_units, input_size].
5081      * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
5082      *      A 2-D tensor of shape [num_units, output_size], where “output_size”
5083      *      corresponds to either the number of cell units (i.e., “num_units”),
5084      *      or the second dimension of the “projection_weights”, if defined.
5085      * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
5086      *      A 2-D tensor of shape [num_units, output_size].
5087      * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
5088      *      A 2-D tensor of shape [num_units, output_size].
5089      * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
5090      *      A 2-D tensor of shape [num_units, output_size].
5091      * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
5092      *      A 1-D tensor of shape [num_units].
5093      * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
5094      *      A 1-D tensor of shape [num_units].
5095      * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
5096      *      A 1-D tensor of shape [num_units].
5097      * * 12:The input gate bias (\f$b_i\f$). Optional.
5098      *      A 1-D tensor of shape [num_units].
5099      * * 13:The forget gate bias (\f$b_f\f$).
5100      *      A 1-D tensor of shape [num_units].
5101      * * 14:The cell bias (\f$b_c\f$).
5102      *      A 1-D tensor of shape [num_units].
5103      * * 15:The output gate bias (\f$b_o\f$).
5104      *      A 1-D tensor of shape [num_units].
5105      * * 16:The projection weights (\f$W_{proj}\f$). Optional.
5106      *      A 2-D tensor of shape [output_size, num_units].
5107      * * 17:The projection bias (\f$b_{proj}\f$). Optional.
5108      *      A 1-D tensor of shape [output_size].
5109      * * 18:The output state (in) (\f$h_{t-1}\f$).
5110      *      A 2-D tensor of shape [batch_size, output_size].
5111      * * 19:The cell state (in) (\f$C_{t-1}\f$).
5112      *      A 2-D tensor of shape [batch_size, num_units].
5113      * * 20:The activation function (\f$g\f$).
5114      *      A value indicating the activation function:
5115      *      <ul>
5116      *      <li>0: None;
5117      *      <li>1: Relu;
5118      *      <li>3: Relu6;
5119      *      <li>4: Tanh;
5120      *      <li>6: Sigmoid.
5121      *      </ul>
5122      * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
5123      *      that values are bound within [-cell_clip, cell_clip]. If set to 0.0
5124      *      then clipping is disabled.
5125      * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
5126      *      projection layer, such that values are bound within
5127      *      [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
5128      * * 23:Time-major if true, batch-major if false.
5129      * * 24:The input layer normalization weights. Optional.
5130      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5131      *      to activation at input gate.
5132      * * 25:The forget layer normalization weights. Optional.
5133      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5134      *      to activation at forget gate.
5135      * * 26:The cell layer normalization weights. Optional.
5136      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5137      *      to activation at cell gate.
5138      * * 27:The output layer normalization weights. Optional.
5139      *      A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
5140      *      to activation at output gate.
5141      *
5142      * Outputs:
5143      * * 0: The output (\f$o_t\f$).
5144      *      A 3-D tensor of shape:
5145      *        If time-major: [max_time, batch_size, output_size]
5146      *        If batch-major: [batch_size, max_time, output_size]
5147      * * 1: A tensor of shape [batch_size, output_size] containing a hidden
5148      *      state from the last time step in the sequence. This output is
5149      *      optional and can be omitted. If this output is present then
5150      *      output #2 must be present as well.
5151      *      Available since NNAPI feature level 4.
5152      * * 2: A tensor of shape [batch_size, cell_size] containing a cell state
5153      *      from the last time step in the sequence. This output is optional
5154      *      and can be omitted.
5155      *      Available since NNAPI feature level 4.
5156      *
5157      * Available since NNAPI feature level 3.
5158      *
5159      * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out
5160      * and NNAPI does not maintain internal states. This operator does not support the usage pattern
5161      * in which multiple cells are chained and state tensors are propagated.
5162      */
5163     ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
5164 
5165     /**
5166      * A recurrent neural network layer that applies a basic RNN cell to a
5167      * sequence of inputs.
5168      *
5169      * This layer unrolls the input along the sequence dimension, and implements
5170      * the following operation
5171      * for each element in the sequence s = 1...sequence_length:
5172      *   outputs[s] = state = activation(inputs[s] * input_weights’ + state *
5173      *   recurrent_weights’ + bias)
5174      *
5175      * Where:
5176      * * “input_weights” is a weight matrix that multiplies the inputs;
5177      * * “recurrent_weights” is a weight matrix that multiplies the current
5178      *    “state” which itself is the output from the previous time step
5179      *    computation;
5180      * * “bias” is a bias vector (added to each output vector in the batch);
5181      * * “activation” is the function passed as the “fused_activation_function”
5182      *   argument (if not “NONE”).
5183      *
5184      * Supported tensor {@link OperandCode}:
5185      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5186      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5187      *
5188      * The input tensors must all be the same type.
5189      *
5190      * Inputs:
5191      * * 0: input.
5192      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
5193      *      it is set to 1, then the input has a shape [maxTime, batchSize,
5194      *      inputSize], otherwise the input has a shape [batchSize, maxTime,
5195      *      inputSize].
5196      * * 1: weights.
5197      *      A 2-D tensor of shape [numUnits, inputSize].
5198      * * 2: recurrent_weights.
5199      *      A 2-D tensor of shape [numUnits, numUnits].
5200      * * 3: bias.
5201      *      A 1-D tensor of shape [numUnits].
5202      * * 4: hidden state
5203      *      A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden
5204      *      state input for the first time step of the computation.
5205      * * 5: fusedActivationFunction.
5206      *      A {@link FuseCode} value indicating the activation function. If
5207      *      “NONE” is specified then it results in a linear activation.
5208      * * 6: timeMajor
5209      *      An {@link ANEURALNETWORKS_INT32} scalar specifying the shape format
5210      *      of input and output tensors. Must be set to either 0 or 1.
5211      * Outputs:
5212      * * 0: output.
5213      *      A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
5214      *      it is set to 1, then the output has a shape [maxTime, batchSize,
5215      *      numUnits], otherwise the output has a shape [batchSize, maxTime,
5216      *      numUnits].
5217      * * 1: A tensor of shape [batchSize, numUnits] containing hidden state
5218      *      from the last time step in the sequence. This output is optional
5219      *      and can be omitted.
5220      *      Available since NNAPI feature level 4.
5221      *
5222      * Available since NNAPI feature level 3.
5223      *
5224      * Important: As of NNAPI feature level 3, there is no way to get the output state tensors out
5225      * and NNAPI does not maintain internal states. This operator does not support the usage pattern
5226      * in which multiple cells are chained and state tensors are propagated.
5227      */
5228     ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN = 93,
5229 
5230     /**
5231      * Resizes images to given size using the nearest neighbor interpretation.
5232      *
5233      * Resized images must be distorted if their output aspect ratio is not the
5234      * same as input aspect ratio. The corner pixels of output may not be the
5235      * same as corner pixels of input.
5236      *
5237      * Supported tensor {@link OperandCode}:
5238      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5239      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5240      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5241      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} (since NNAPI feature level 4)
5242      *
5243      * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
5244      * With the default data layout NHWC, the data is stored in the order of:
5245      * [batch, height, width, channels]. Alternatively, the data layout could
5246      * be NCHW, the data storage order of: [batch, channels, height, width].
5247      *
5248      * Both resizing by shape and resizing by scale are supported.
5249      *
5250      * Inputs (resizing by shape):
5251      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
5252      *      the input. Zero batches is supported for this tensor.
5253      * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
5254      *      width of the output tensor.
5255      * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
5256      *      height of the output tensor.
5257      * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false.
5258      *      Set to true to specify NCHW data layout for input0 and output0.
5259      * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
5260      *      scalar, default to false.  If True, the centers of the 4 corner
5261      *      pixels of the input and output tensors are aligned, preserving the
5262      *      values at the corner pixels.
5263      *      Available since NNAPI feature level 4.
5264      * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
5265      *      scalar, default to false. If True, the pixel centers are assumed to
5266      *      be at (0.5, 0.5). This is the default behavior of image.resize in
5267      *      TF 2.0. If this parameter is True, then align_corners parameter
5268      *      must be False.
5269      *      Available since NNAPI feature level 4.
5270      *
5271      * Inputs (resizing by scale):
5272      * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
5273      *      the input. Zero batches is supported for this tensor.
5274      * * 1: A scalar, specifying width_scale, the scaling factor of the width
5275      *      dimension from the input tensor to the output tensor. The output
5276      *      width is calculated as new_width = floor(width * width_scale).
5277      *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
5278      *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
5279      *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
5280      * * 2: A scalar, specifying height_scale, the scaling factor of the height
5281      *      dimension from the input tensor to the output tensor. The output
5282      *      height is calculated as new_height = floor(height * height_scale).
5283      *      The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
5284      *      of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
5285      *      {@link ANEURALNETWORKS_FLOAT32} otherwise.
5286      * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false.
5287      *      Set to true to specify NCHW data layout for input0 and output0.
5288      * * 4: Align corners. An optional {@link ANEURALNETWORKS_BOOL}
5289      *      scalar, default to false.  If True, the centers of the 4 corner
5290      *      pixels of the input and output tensors are aligned, preserving the
5291      *      values at the corner pixels.
5292      *      Available since NNAPI feature level 4.
5293      * * 5: Half pixel centers. An optional {@link ANEURALNETWORKS_BOOL}
5294      *      scalar, default to false. If True, the pixel centers are assumed to
5295      *      be at (0.5, 0.5). This is the default behavior of image.resize in
5296      *      TF 2.0. If this parameter is True, then align_corners parameter
5297      *      must be False.
5298      *      Available since NNAPI feature level 4.
5299      *
5300      * Outputs:
5301      * * 0: The output 4-D tensor, of shape
5302      *      [batches, new_height, new_width, depth].
5303      *      For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} and
5304      *      {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED} tensor,
5305      *      the scale and zeroPoint must be the same as input0.
5306      *
5307      * Available since NNAPI feature level 3.
5308      */
5309     ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR = 94,
5310 
5311     // Operations below are available since NNAPI feature level 4.
5312 
5313     /**
5314      * Quantized version of {@link ANEURALNETWORKS_LSTM}.
5315      *
5316      * The input and the output use asymmetric quantized types, while the rest
5317      * use symmetric ones.
5318      *
5319      * Inputs:
5320      * * 0: The input to the LSTM cell.
5321      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5322      *      Shape: [batchSize, inputSize]
5323      * * 1: The input-to-input weights. Optional.
5324      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5325      *      Shape: [numUnits, inputSize]
5326      * * 2: The input-to-forget weights.
5327      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5328      *      Shape: [numUnits, inputSize]
5329      * * 3: The input-to-cell weights.
5330      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5331      *      Shape: [numUnits, inputSize]
5332      * * 4: The input-to-output weights.
5333      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5334      *      Shape: [numUnits, inputSize]
5335      * * 5: The recurrent-to-input weights. Optional.
5336      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5337      *      Shape: [numUnits, outputSize]
5338      * * 6: The recurrent-to-forget weights.
5339      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5340      *      Shape: [numUnits, outputSize]
5341      * * 7: The recurrent-to-cell weights.
5342      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5343      *      Shape: [numUnits, outputSize]
5344      * * 8: The recurrent-to-output weights.
5345      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5346      *      Shape: [numUnits, outputSize]
5347      * * 9: The cell-to-input weights (for peephole). Optional.
5348      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5349      *      Shape: [numUnits]
5350      * * 10: The cell-to-forget weights (for peephole). Optional.
5351      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5352      *       Shape: [numUnits]
5353      * * 11: The cell-to-output weights (for peephole). Optional.
5354      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5355      *       Shape: [numUnits]
5356      * * 12: The input gate bias. Quantized with scale being the
5357      *       product of input and weights scales and zeroPoint equal to 0.
5358      *       Optional.
5359      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5360      *       Shape: [numUnits]
5361      * * 13: The forget gate bias. Quantized with scale being the
5362      *       product of input and weights scales and zeroPoint equal to 0.
5363      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5364      *       Shape: [numUnits]
5365      * * 14: The cell bias. Quantized with scale being the
5366      *       product of input and weights scales and zeroPoint equal to 0.
5367      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5368      *       Shape: [numUnits]
5369      * * 15: The output gate bias. Quantized with scale being the
5370      *       product of input and weights scales and zeroPoint equal to 0.
5371      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5372      *       Shape: [numUnits]
5373      * * 16: The projection weights. Optional.
5374      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5375      *       Shape: [outputSize, numUnits]
5376      * * 17: The projection bias. Quantized with scale being the
5377      *       product of input and weights scales and zeroPoint equal to 0.
5378      *       Optional.
5379      *       Type: {@link ANEURALNETWORKS_TENSOR_INT32}
5380      *       Shape: [outputSize]
5381      * * 18: The output from the previous time step.
5382      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5383      *       Shape: [batchSize, outputSize]
5384      * * 19: The cell state from the previous time step.
5385      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5386      *       Shape: [batchSize, numUnits]
5387      * * 20: The input layer normalization weights. Used to rescale
5388      *       normalized inputs to activation at input gate. Optional.
5389      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5390      *       Shape: [numUnits]
5391      * * 21: The forget layer normalization weights. Used to
5392      *       rescale normalized inputs to activation at forget gate. Optional.
5393      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5394      *       Shape: [numUnits]
5395      * * 22: The cell layer normalization weights. Used to rescale
5396      *       normalized inputs to activation at cell gate. Optional.
5397      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5398      *       Shape: [numUnits]
5399      * * 23: The output layer normalization weights. Used to
5400      *       rescale normalized inputs to activation at output gate. Optional.
5401      *       Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5402      *       Shape: [numUnits]
5403      * * 24: The cell clip. If provided the cell state is clipped
5404      *       by this value prior to the cell output activation. Optional.
5405      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5406      * * 25: The projection clip. If provided and projection is enabled,
5407      *       this is used for clipping the projected values. Optional.
5408      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5409      * * 26: The scale of the intermediate result of matmul,
5410      *       i.e. input to layer normalization, at input gate.
5411      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5412      * * 27: The scale of the intermediate result of matmul,
5413      *       i.e. input to layer normalization, at forget gate.
5414      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5415      * * 28: The scale of the intermediate result of matmul,
5416      *       i.e. input to layer normalization, at cell gate.
5417      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5418      * * 29: The scale of the intermediate result of matmul,
5419      *       i.e. input to layer normalization, at output gate.
5420      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5421      * * 30: The zero point of the hidden state, i.e. input to
5422      *       projection.
5423      *       Type: {@link ANEURALNETWORKS_INT32}.
5424      * * 31: The scale of the hidden state, i.e. input to
5425      *       projection.
5426      *       Type: {@link ANEURALNETWORKS_FLOAT32}.
5427      *
5428      * Outputs:
5429      * * 0: The output state (out).
5430      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5431      *      Shape: [batchSize, outputSize]
5432      * * 1: The cell state (out).
5433      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5434      *      Shape: [batchSize, numUnits]
5435      * * 2: The output. This is effectively the same as the current
5436      *      "output state (out)" value.
5437      *      Type: {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5438      *      Shape: [batchSize, outputSize]
5439      *
5440      * Available since NNAPI feature level 4.
5441      */
5442     ANEURALNETWORKS_QUANTIZED_LSTM = 95,
5443 
5444     /**
5445      * Executes one of the two referenced models as determined by a boolean
5446      * value.
5447      *
5448      * The inputs and outputs of the two referenced models must agree with the
5449      * signature of this operation. That is, if the operation has (3 + n) inputs
5450      * and m outputs, both models must have n inputs and m outputs with the same
5451      * types, ranks (if specified), dimensions (if specified), scales,
5452      * zeroPoints, and other operand parameters as the corresponding operation
5453      * inputs and outputs.
5454      *
5455      * Inputs:
5456      * * 0: A value of type {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1]
5457      *      that determines which of the two referenced models to execute.
5458      *      The operand must have fully specified dimensions.
5459      * * 1: A {@link ANEURALNETWORKS_MODEL} reference to the model to be
5460      *      executed if the condition is true.
5461      * * 2: A {@link ANEURALNETWORKS_MODEL} reference to the model to be
5462      *      executed if the condition is false.
5463      * * 3 ~ (n + 2): Inputs to be passed to the model selected for execution.
5464      *
5465      * Outputs:
5466      * * 0 ~ (m - 1): Outputs produced by the selected model.
5467      *
5468      * Available since NNAPI feature level 4.
5469      */
5470     ANEURALNETWORKS_IF = 96,
5471 
5472     /**
5473      * Executes the body model until the condition model outputs false.
5474      *
5475      * The inputs to this operation are the condition model, the body model,
5476      * and operand values for the first iteration of the loop. The values are
5477      * implicitly split into three groups of input-output, state-only, and
5478      * input-only values, as described below.
5479      *
5480      * The outputs of this operation are the final values of input-output
5481      * operands.
5482      *
5483      * Both the condition and body model receive (m + k + n) inputs.
5484      * * The first m (m >= 1) inputs are input-output operands. For the first
5485      *   iteration, these are initialized from the corresponding inputs of the
5486      *   WHILE operation. In subsequent iterations, their values come from the
5487      *   corresponding outputs of the body model produced during the previous
5488      *   iteration.
5489      * * The next k (k >= 0) inputs are state-only operands. They are similar to
5490      *   the input-output operands, except that their values are no longer
5491      *   available after the loop terminates.
5492      * * The last n (n >= 0) inputs are input-only operands. Their values come
5493      *   from the corresponding inputs of the WHILE operation.
5494      *
5495      * The body model produces (m + k) outputs.
5496      * * The first m outputs are input-output operands. They become the outputs
5497      *   of the WHILE operation when a termination condition is reached.
5498      * * The last k outputs are state-only operands. Their values are no longer
5499      *   available after the loop terminates.
5500      *
5501      * The numbers m, k, and n are inferred by the runtime as follows:
5502      *     m = (WHILE operation output count)
5503      *     k = (body model output count) - m
5504      *     n = (body model input count) - m - k
5505      *
5506      * The pseudo-code below illustrates the flow of a WHILE operation with
5507      * inputs condition, body, initial_input_output, initial_state, input_only
5508      * (m = 1, k = 1, n = 1):
5509      *
5510      *     input_output = initial_input_output
5511      *     state = initial_state
5512      *     while condition(input_output, state, input_only):
5513      *         input_output, state = body(input_output, state, input_only)
5514      *     return input_output
5515      *
5516      * To prevent infinite loops, there is an implicit execution timeout
5517      * associated with each loop ("loop timeout duration"). See {@link
5518      * ANeuralNetworksExecution_setLoopTimeout}.
5519      *
5520      * Inputs:
5521      * * 0: A {@link ANEURALNETWORKS_MODEL} reference to the condition
5522      *      model. The model must have (m + k + n) inputs with
5523      *      the same types, ranks (if specified), dimensions (if specified),
5524      *      scales, zeroPoints, and other operand parameters as the
5525      *      corresponding inputs of the WHILE operation and exactly one output
5526      *      of {@link ANEURALNETWORKS_TENSOR_BOOL8} and shape [1].
5527      *      The output operand must have fully specified dimensions.
5528      * * 1: A {@link ANEURALNETWORKS_MODEL} reference to the body model.
5529      *      The model must have (m + k + n) inputs and (m + k) outputs with
5530      *      the same types, ranks (if specified), dimensions (if specified),
5531      *      scales, zeroPoints, and other operand parameters as the
5532      *      corresponding inputs and outputs of the WHILE operation.
5533      * * (m inputs): Initial values for input-output operands.
5534      * * (k inputs): Initial values for state-only operands.
5535      * * (n inputs): Values for input-only operands.
5536      *
5537      * Outputs:
5538      * * 0 ~ (m - 1): Outputs produced by the loop.
5539      *
5540      * Available since NNAPI feature level 4.
5541      */
5542     ANEURALNETWORKS_WHILE = 97,
5543 
5544     /**
5545      * Computes exponential linear activation on the input tensor element-wise.
5546      *
5547      * The output is calculated using the following formula:
5548      *
5549      *     ELU(x) = max(0, x) + min(0, alpha * (exp(x) - 1))
5550      *
5551      * Supported tensor {@link OperandCode}:
5552      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5553      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5554      *
5555      * Supported tensor rank: from 1.
5556      *
5557      * Inputs:
5558      * * 0: A tensor, specifying the input. May be zero-sized.
5559      * * 1: A scalar, specifying the alpha parameter.
5560      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16},
5561      *      the alpha value must be of {@link ANEURALNETWORKS_FLOAT16}.
5562      *      For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32},
5563      *      the alpha value must be of {@link ANEURALNETWORKS_FLOAT32}.
5564      *
5565      * Outputs:
5566      * * 0: The output tensor of same shape and type as input0.
5567      *
5568      * Available since NNAPI feature level 4.
5569      */
5570     ANEURALNETWORKS_ELU = 98,
5571 
5572     /**
5573      * Computes hard-swish activation on the input tensor element-wise.
5574      *
5575      * Hard swish activation is introduced in
5576      * https://arxiv.org/pdf/1905.02244.pdf
5577      *
5578      * The output is calculated using the following formula:
5579      *
5580      *     h-swish(x) = x * max(0, min(6, (x + 3))) / 6
5581 
5582      * Supported tensor {@link OperandCode}:
5583      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5584      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5585      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5586      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5587      *
5588      * Supported tensor rank: from 1.
5589      *
5590      * Inputs:
5591      * * 0: A tensor, specifying the input. May be zero-sized.
5592      *
5593      * Outputs:
5594      * * 0: The output tensor of same shape and type as input0.
5595      *      Scale and zero point of this tensor may be different from the input
5596      *      tensor's parameters.
5597      *
5598      * Available since NNAPI feature level 4.
5599      */
5600     ANEURALNETWORKS_HARD_SWISH = 99,
5601 
5602     /**
5603      * Creates a tensor filled with a scalar value.
5604      *
5605      * Supported output tensor {@link OperandCode}:
5606      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5607      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5608      * * {@link ANEURALNETWORKS_TENSOR_INT32}
5609      *
5610      * Supported tensor rank: from 1.
5611      *
5612      * Inputs:
5613      * * 0: A 1-D tensor, specifying the desired output tensor shape.
5614      * * 1: A scalar, specifying the value to fill the output tensors with.
5615      *      For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16},
5616      *      the scalar must be of {@link ANEURALNETWORKS_FLOAT16}.
5617      *      For output tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32},
5618      *      the scalar must be of {@link ANEURALNETWORKS_FLOAT32}.
5619      *      For output tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
5620      *      the scalar must be of {@link ANEURALNETWORKS_INT32}.
5621      *
5622      * Outputs:
5623      * * 0: The output tensor.
5624      *
5625      * Available since NNAPI feature level 4.
5626      */
5627     ANEURALNETWORKS_FILL = 100,
5628 
5629     /**
5630      * Returns the rank of a tensor.
5631      *
5632      * The rank of a tensor is the number of dimensions in it. Also known as
5633      * "order", "degree", "ndims".
5634      *
5635      * Supported tensor {@link OperandCode}:
5636      * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
5637      * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
5638      * * {@link ANEURALNETWORKS_TENSOR_INT32}
5639      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
5640      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
5641      * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
5642      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
5643      * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}
5644      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM}
5645      * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED}
5646      *
5647      * Supported tensor rank: from 1.
5648      *
5649      * Inputs:
5650      * * 0: The input tensor.
5651      *
5652      * Outputs:
5653      * * 0: A scalar of {@link ANEURALNETWORKS_INT32}, specifying the rank
5654      *      of the input tensor.
5655      *
5656      * Available since NNAPI feature level 4.
5657      */
5658     ANEURALNETWORKS_RANK = 101,
5659 } OperationCode;
5660 
5661 /**
5662  * Fused activation function types.
5663  *
5664  * Available since NNAPI feature level 1.
5665  */
5666 typedef enum {
5667     /** NO fused activation function. */
5668     ANEURALNETWORKS_FUSED_NONE = 0,
5669     /** Fused ReLU activation function. */
5670     ANEURALNETWORKS_FUSED_RELU = 1,
5671     /** Fused ReLU1 activation function. */
5672     ANEURALNETWORKS_FUSED_RELU1 = 2,
5673     /** Fused ReLU6 activation function. */
5674     ANEURALNETWORKS_FUSED_RELU6 = 3,
5675 } FuseCode;
5676 
5677 /**
5678  * Implicit padding algorithms.
5679  *
5680  *
5681  * Available since NNAPI feature level 1.
5682  */
5683 typedef enum {
5684     /**
5685      * SAME padding.
5686      * Padding on both ends are the "same":
5687      *     padding_to_beginning =  total_padding / 2
5688      *     padding_to_end       = (total_padding + 1)/2.
5689      * i.e., for even number of padding, padding to both ends are exactly
5690      * the same; for odd number of padding, padding to the ending is bigger
5691      * than the padding to the beginning by 1.
5692      *
5693      * total_padding is a function of input, stride, dilation and filter size.
5694      * It could be computed as follows:
5695      *    out_size = (input + stride - 1) / stride
5696      *    effective_filter_size = (filter_size - 1) * dilation + 1
5697      *    needed_input = (out_size - 1) * stride + effective_filter_size
5698      *    total_padding = max(0, needed_input - input_size)
5699      *  The computation is the same for the horizontal and vertical directions.
5700      */
5701     ANEURALNETWORKS_PADDING_SAME = 1,
5702 
5703     /**
5704      * VALID padding.
5705      * No padding. When the input size is not evenly divisible by
5706      * the filter size, the input at the end that could not fill
5707      * the whole filter tile will simply be ignored.
5708      */
5709     ANEURALNETWORKS_PADDING_VALID = 2,
5710 } PaddingCode;
5711 
5712 /**
5713  * Execution preferences.
5714  *
5715  * Available since NNAPI feature level 1.
5716  */
5717 typedef enum {
5718     /**
5719      * Prefer executing in a way that minimizes battery drain.
5720      * This is desirable for compilations that will be executed often.
5721      */
5722     ANEURALNETWORKS_PREFER_LOW_POWER = 0,
5723     /**
5724      * Prefer returning a single answer as fast as possible, even if this causes
5725      * more power consumption.
5726      */
5727     ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1,
5728     /**
5729      * Prefer maximizing the throughput of successive frames, for example when
5730      * processing successive frames coming from the camera.
5731      */
5732     ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2,
5733 } PreferenceCode;
5734 
5735 /**
5736  * Device types.
5737  *
5738  * The type of NNAPI device.
5739  */
5740 typedef enum {
5741     /** The device type cannot be provided. */
5742     ANEURALNETWORKS_DEVICE_UNKNOWN = 0,
5743     /** The device does not fall into any category below. */
5744     ANEURALNETWORKS_DEVICE_OTHER = 1,
5745     /** The device runs NNAPI models on single or multi-core CPU. */
5746     ANEURALNETWORKS_DEVICE_CPU = 2,
5747     /** The device can run NNAPI models and also accelerate graphics APIs such
5748      * as OpenGL ES and Vulkan. */
5749     ANEURALNETWORKS_DEVICE_GPU = 3,
5750     /** Dedicated accelerator for Machine Learning workloads. */
5751     ANEURALNETWORKS_DEVICE_ACCELERATOR = 4,
5752 } DeviceTypeCode;
5753 
5754 /**
5755  * NNAPI feature levels.
5756  *
5757  * Each update of the NNAPI specification yields a new NNAPI feature level enum value.
5758  * NNAPI feature level corrseponds to an NNAPI specification version that a driver
5759  * and/or the NNAPI runtime can implement.
5760  *
5761  * A feature level up to and including "FEATURE_LEVEL_5" maps directly to
5762  * the Android API level that introduced the corresponding update of the NNAPI
5763  * specification. Feature levels after Android API level 31 have no association with
5764  * API level because the NNAPI specification can be updated between Android API
5765  * releases. Outputs of {@link ANeuralNetworksDevice_getFeatureLevel} and
5766  * {@link ANeuralNetworks_getRuntimeFeatureLevel} must be compared against
5767  * these enum values instead of the Android API level.
5768  */
5769 typedef enum {
5770     /** NNAPI specification available in Android O-MR1, Android NNAPI feature level 1 */
5771     ANEURALNETWORKS_FEATURE_LEVEL_1 = 27,
5772     /** NNAPI specification available in Android P, Android NNAPI feature level 2 */
5773     ANEURALNETWORKS_FEATURE_LEVEL_2 = 28,
5774     /** NNAPI specification available in Android Q, Android NNAPI feature level 3 */
5775     ANEURALNETWORKS_FEATURE_LEVEL_3 = 29,
5776     /** NNAPI specification available in Android R, Android NNAPI feature level 4 */
5777     ANEURALNETWORKS_FEATURE_LEVEL_4 = 30,
5778     /**
5779      * NNAPI specification available in Android S, Android NNAPI feature level 5.
5780      * After Android S, the NNAPI specification can be updated between Android
5781      * API releases.
5782      */
5783     ANEURALNETWORKS_FEATURE_LEVEL_5 = 31,
5784 } FeatureLevelCode;
5785 
5786 /**
5787  * Result codes.
5788  *
5789  * <p>Any NNAPI function can return any result code, including result codes not
5790  * currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR}
5791  * indicates a failure of some kind.</p>
5792  *
5793  * <p>Additional information about the nature of a failure can be obtained from
5794  * the device log after enabling NNAPI debugging by setting the debug.nn.vlog
5795  * property to 1, e.g., by calling "adb shell setprop debug.nn.vlog 1".</p>
5796  *
5797  * Available since NNAPI feature level 1.
5798  */
5799 typedef enum {
5800     /**
5801      * Operation was successful.
5802      */
5803     ANEURALNETWORKS_NO_ERROR = 0,
5804 
5805     /**
5806      * Failure caused by not enough available memory.
5807      */
5808     ANEURALNETWORKS_OUT_OF_MEMORY = 1,
5809 
5810     ANEURALNETWORKS_INCOMPLETE = 2,
5811 
5812     /**
5813      * Failure caused by unexpected null argument.
5814      */
5815     ANEURALNETWORKS_UNEXPECTED_NULL = 3,
5816 
5817     /**
5818      * Failure caused by invalid function arguments, invalid model definition,
5819      * invalid execution definition or invalid data at execution time.
5820      */
5821     ANEURALNETWORKS_BAD_DATA = 4,
5822 
5823     /**
5824      * Failure caused by failed model execution.
5825      */
5826     ANEURALNETWORKS_OP_FAILED = 5,
5827 
5828     /**
5829      * Failure caused by object being in the wrong state.
5830      */
5831     ANEURALNETWORKS_BAD_STATE = 6,
5832 
5833     /**
5834      * Failure caused by not being able to map a file into memory.
5835      * This may be caused by a file descriptor not being mappable, or an AHardwareBuffer
5836      * not supported by the device.
5837      * Mitigate by reading its content into memory.
5838      */
5839     ANEURALNETWORKS_UNMAPPABLE = 7,
5840 
5841     /**
5842      * Failure caused by insufficient buffer size provided to a model output.
5843      */
5844     ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8,
5845 
5846     /**
5847      * Failure caused by a device not being available.
5848      */
5849     ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9,
5850 
5851     /**
5852      * Failure because a deadline could not be met for a task, but future
5853      * deadlines may still be met for the same task after a short delay.
5854      *
5855      * Available since NNAPI feature level 4.
5856      */
5857     ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT = 10,
5858 
5859     /**
5860      * Failure because a deadline could not be met for a task, and future
5861      * deadlines will likely also not be met for the same task even after a
5862      * short delay.
5863      *
5864      * Available since NNAPI feature level 4.
5865      */
5866     ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT = 11,
5867 
5868     /**
5869      * Failure because of a resource limitation within the driver, but future
5870      * calls for the same task may still succeed after a short delay.
5871      *
5872      * Available since NNAPI feature level 4.
5873      */
5874     ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT = 12,
5875 
5876     /**
5877      * Failure because of a resource limitation within the driver, and future
5878      * calls for the same task will likely also fail even after a short
5879      * delay.
5880      *
5881      * Available since NNAPI feature level 4.
5882      */
5883     ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT = 13,
5884 
5885     /**
5886      * Failure indicating an object is in a dead state.
5887      *
5888      * Available since NNAPI feature level 4.
5889      */
5890     ANEURALNETWORKS_DEAD_OBJECT = 14,
5891 } ResultCode;
5892 
5893 /**
5894  * For {@link ANeuralNetworksModel_setOperandValue}, values with a
5895  * length smaller or equal to this will be immediately copied into
5896  * the model. The size is in bytes.
5897  *
5898  * Available since NNAPI feature level 1.
5899  */
5900 enum { ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES = 128 };
5901 
5902 /**
5903  * For {@link ANeuralNetworksCompilation_setCaching}, specify the size
5904  * of the cache token required from the application. The size is in bytes.
5905  *
5906  * Available since NNAPI feature level 3.
5907  */
5908 enum { ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN = 32 };
5909 
5910 /**
5911  * Different duration measurements.
5912  *
5913  * Durations are measured in nanoseconds.
5914  *
5915  * Available since NNAPI feature level 3.
5916  */
5917 typedef enum {
5918     // Execution time on hardware (not driver, which runs on host processor).
5919     ANEURALNETWORKS_DURATION_ON_HARDWARE = 0,
5920     // Execution time in driver (including time on hardware).  Excludes overhead
5921     // such as that of the runtime itself and the IPC needed for the runtime to
5922     // communicate with the driver.
5923     ANEURALNETWORKS_DURATION_IN_DRIVER = 1,
5924     // Execution time on hardware, after all dependencies have been signaled.
5925     // If no dependencies specified (for example, if the execution was scheduled other
5926     // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the
5927     // reported time will be the same as ANEURALNETWORKS_DURATION_ON_HARDWARE.
5928     // Available since NNAPI feature level 4.
5929     ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE = 2,
5930     // Execution time in driver, after all dependencies have been signaled. Excludes
5931     // overhead such as that of the runtime itself and the IPC needed for the runtime
5932     // to communicate with the driver.
5933     // If no dependencies specified (for example, if the execution was scheduled other
5934     // than with {@link ANeuralNetworksExecution_startComputeWithDependencies}), the
5935     // reported time will be the same as ANEURALNETWORKS_DURATION_IN_DRIVER.
5936     // Available since NNAPI feature level 4.
5937     ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER = 3,
5938 } DurationCode;
5939 
5940 /**
5941  * Relative execution priority.
5942  *
5943  * Available since NNAPI feature level 4.
5944  */
5945 typedef enum {
5946     ANEURALNETWORKS_PRIORITY_LOW = 90,
5947     ANEURALNETWORKS_PRIORITY_MEDIUM = 100,
5948     ANEURALNETWORKS_PRIORITY_HIGH = 110,
5949     ANEURALNETWORKS_PRIORITY_DEFAULT = ANEURALNETWORKS_PRIORITY_MEDIUM,
5950 } PriorityCode;
5951 
5952 /**
5953  * ANeuralNetworksMemory is an opaque type that represents memory.
5954  *
5955  * This type is used to represent shared memory, memory mapped files,
5956  * and similar memories.
5957  *
5958  * By using shared memory, a program can efficiently communicate to the
5959  * runtime and drivers the tensors that define a model. See
5960  * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application
5961  * should typically create one shared memory object that contains every constant tensor
5962  * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be used to
5963  * create shared memory from a file handle.
5964  * {@link ANeuralNetworksMemory_createFromAHardwareBuffer} can be used to
5965  * create shared memory from an AHardwareBuffer handle.
5966  *
5967  * Memory objects can also be used to specify the input and output arguments of
5968  * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory}
5969  * and {@link ANeuralNetworksExecution_setOutputFromMemory}.
5970  *
5971  * When calling {@link ANeuralNetworksModel_setOperandValueFromMemory},
5972  * {@link ANeuralNetworksExecution_setInputFromMemory} and
5973  * {@link ANeuralNetworksExecution_setOutputFromMemory}, each operand in the shared
5974  * memory object must be aligned on a boundary of a byte size that is a multiple
5975  * of the element type byte size, e.g., a tensor with
5976  * {@link ANEURALNETWORKS_TENSOR_FLOAT32} type must be aligned on 4-byte boundary.
5977  *
5978  * It is the application's responsibility to ensure that there are no uses of
5979  * the memory after calling {@link ANeuralNetworksMemory_free}. This includes
5980  * any model which references this memory because of a call to
5981  * {@link ANeuralNetworksModel_setOperandValueFromMemory}, any compilation
5982  * created using such a model, any execution object or burst object created
5983  * using such a compilation, or any execution which references this memory
5984  * because of a call to {@link ANeuralNetworksExecution_setInputFromMemory} or
5985  * {@link ANeuralNetworksExecution_setOutputFromMemory}.
5986  *
5987  * Available since NNAPI feature level 1.
5988  *
5989  * Starting at NNAPI feature level 4, the application may request creation of device native memory
5990  * from {@link ANeuralNetworksMemoryDesc} to avoid potential memory copying and transformation
5991  * overhead between executions. See also {@link ANeuralNetworksMemoryDesc} and
5992  * {@link ANeuralNetworksMemory_createFromDesc}.
5993  */
5994 typedef struct ANeuralNetworksMemory ANeuralNetworksMemory;
5995 
5996 /**
5997  * ANeuralNetworksModel is an opaque type that contains a description of the
5998  * mathematical operations that constitute the model.
5999  *
6000  * <p>Build the model by calling<ul>
6001  * <li>{@link ANeuralNetworksModel_create}</li>
6002  * <li>{@link ANeuralNetworksModel_addOperation}</li>
6003  * <li>{@link ANeuralNetworksModel_addOperand}</li>
6004  * </ul>
6005  *
6006  * This forms a graph in which each operation and operand is a node, a
6007  * directed edge from an operand to an operation indicates that the
6008  * operand is an input to the operation, and a directed edge from an
6009  * operation to an operand indicates that the operand is an output
6010  * from the operation. This graph must be acyclic.
6011  *
6012  * A model is completed by calling {@link ANeuralNetworksModel_finish}.
6013  * A model is destroyed by calling {@link ANeuralNetworksModel_free}.
6014  *
6015  * <p>A model cannot be modified once {@link ANeuralNetworksModel_finish}
6016  * has been called on it.</p>
6017  *
6018  * <p>It is the application's responsibility to make sure that only one thread
6019  * modifies a model at a given time. It is however safe for more than one
6020  * thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p>
6021  *
6022  * <p>It is also the application's responsibility to ensure that there are no
6023  * other uses of the model after calling {@link ANeuralNetworksModel_free}.
6024  * This includes any compilation, execution object or burst object created using
6025  * the model.</p>
6026  *
6027  * Available since NNAPI feature level 1.
6028  */
6029 typedef struct ANeuralNetworksModel ANeuralNetworksModel;
6030 
6031 /**
6032  * ANeuralNetworksCompilation is an opaque type that can be used to compile
6033  * a machine learning model.
6034  *
6035  * <p>To use:<ul>
6036  *    <li>Create a new compilation instance by calling the
6037  *        {@link ANeuralNetworksCompilation_create} function or
6038  *        {@link ANeuralNetworksCompilation_createForDevices}.</li>
6039  *    <li>Set any desired properties on the compilation (for example,
6040  *        {@link ANeuralNetworksCompilation_setPreference}).</li>
6041  *    <li>Optionally, set the caching signature and the cache directory on the
6042  *        compilation by calling {@link ANeuralNetworksCompilation_setCaching}.</li>
6043  *    <li>Complete the compilation with {@link ANeuralNetworksCompilation_finish}.</li>
6044  *    <li>Use the compilation as many times as needed
6045  *        with {@link ANeuralNetworksExecution_create} and
6046  *        {@link ANeuralNetworksBurst_create}.</li>
6047  *    <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free}
6048  *        once all executions using the compilation have completed.</li></ul></p>
6049  *
6050  * A compilation is completed by calling {@link ANeuralNetworksCompilation_finish}.
6051  * A compilation is destroyed by calling {@link ANeuralNetworksCompilation_free}.
6052  *
6053  * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_finish}
6054  * has been called on it.</p>
6055  *
6056  * <p>It is the application's responsibility to make sure that only
6057  * one thread modifies a compilation at a given time. It is however
6058  * safe for more than one thread to use the compilation once
6059  * {@link ANeuralNetworksCompilation_finish} has returned.</p>
6060  *
6061  * <p>It is also the application's responsibility to ensure that there are no other
6062  * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}.
6063  * This includes any execution object or burst object created using the compilation,
6064  * or any memory descriptor with the compilation as part of one of the roles specified by
6065  * {@link ANeuralNetworksMemoryDesc_addInputRole} or
6066  * {@link ANeuralNetworksMemoryDesc_addOutputRole}.</p>
6067  *
6068  * Available since NNAPI feature level 1.
6069  */
6070 typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation;
6071 
6072 /**
6073  * ANeuralNetworksExecution is an opaque type that can be used to apply a machine
6074  * learning model to a set of inputs.
6075  *
6076  * <p>To use:<ul>
6077  *    <li>Create a new execution instance by calling the
6078  *        {@link ANeuralNetworksExecution_create} function.</li>
6079  *    <li>Associate input buffers or memory regions to the model inputs with
6080  *        {@link ANeuralNetworksExecution_setInput} or
6081  *        {@link ANeuralNetworksExecution_setInputFromMemory}.</li>
6082  *    <li>Associate output buffers or memory regions to the model outputs with
6083  *        {@link ANeuralNetworksExecution_setOutput} or
6084  *        {@link ANeuralNetworksExecution_setOutputFromMemory}.</li>
6085  *    <li>Optionally, configure the execution with
6086  *        {@link ANeuralNetworksExecution_setLoopTimeout},
6087  *        {@link ANeuralNetworksExecution_setMeasureTiming},
6088  *        {@link ANeuralNetworksExecution_setReusable}, or
6089  *        {@link ANeuralNetworksExecution_setTimeout}.
6090  *    <li>Apply the model with one of the following:</li><ul>
6091  *        <li>Asynchronously with {@link ANeuralNetworksExecution_startCompute}
6092  *            or with {@link ANeuralNetworksExecution_startComputeWithDependencies},
6093  *            waiting for the execution to complete with
6094  *            {@link ANeuralNetworksEvent_wait}.</li>
6095  *        <li>Synchronously with {@link ANeuralNetworksExecution_compute}.</li>
6096  *        <li>Synchronously as part of an execution burst with
6097  *            {@link ANeuralNetworksExecution_burstCompute}.</li></ul>
6098  *        If the execution has been marked as reusable, then you can
6099  *        apply the model more than once.
6100  *    <li>Destroy the execution with
6101  *        {@link ANeuralNetworksExecution_free}.</li></ul></p>
6102  *
6103  * <p>An output buffer or memory region must not overlap with any
6104  * other output buffer or memory region, with an input buffer or
6105  * memory region, or with an operand value in a memory object
6106  * ({@link ANeuralNetworksModel_setOperandValueFromMemory}).</p>
6107  *
6108  * <p>An execution is in the preparation state after it is created by
6109  * {@link ANeuralNetworksExecution_create}. An execution may only be modified in the preparation
6110  * state. Scheduling a computation by calling {@link ANeuralNetworksExecution_burstCompute},
6111  * {@link ANeuralNetworksExecution_compute}, {@link ANeuralNetworksExecution_startCompute},
6112  * or {@link ANeuralNetworksExecution_startComputeWithDependencies} will change the state of
6113  * the execution object to the computation state. When the computation completes, the state of
6114  * the execution object will change from the computation state to the completed state.
6115  * The computation is completed when {@link ANeuralNetworksExecution_compute},
6116  * {@link ANeuralNetworksExecution_burstCompute}, or {@link ANeuralNetworksEvent_wait}
6117  * has returned.</p>
6118  *
6119  * <p>An execution can be applied to a model with
6120  * {@link ANeuralNetworksExecution_burstCompute},
6121  * {@link ANeuralNetworksExecution_compute},
6122  * {@link ANeuralNetworksExecution_startCompute} or
6123  * {@link ANeuralNetworksExecution_startComputeWithDependencies} only once. Create new
6124  * executions to do new evaluations of the model.</p>
6125  *
6126  * <p>Starting at NNAPI feature level 5, the application may call
6127  * {@link ANeuralNetworksExecution_setReusable} to set an execution to be reusable for multiple
6128  * computations. The application may schedule and evaluate a computation again from the completed
6129  * state of a reusable execution. The execution cannot be modified between computations.</p>
6130  *
6131  * <p>It is the application's responsibility to make sure that only one thread
6132  * modifies an execution at a given time. It is however safe for more than one
6133  * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p>
6134  *
6135  * <p>It is also the application's responsibility to ensure that the execution
6136  * either has never been scheduled or has completed (i.e., that
6137  * {@link ANeuralNetworksExecution_burstCompute},
6138  * {@link ANeuralNetworksExecution_compute}, or
6139  * {@link ANeuralNetworksEvent_wait} has returned) before calling
6140  * {@link ANeuralNetworksExecution_free}.</p>.
6141  *
6142  * <p>It is also the application's responsibility to ensure that there are no other
6143  * uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p>
6144  *
6145  * <p>It is the application's responsibility to ensure that there are no concurrent computations
6146  * scheduled and evaluated on the same execution, either by means of
6147  * {@link ANeuralNetworksExecution_compute} or
6148  * {@link ANeuralNetworksExecution_burstCompute} (which are synchronous)
6149  * in different threads, or by means of
6150  * {@link ANeuralNetworksExecution_startCompute} or
6151  * {@link ANeuralNetworksExecution_startComputeWithDependencies} (which are asynchronous).
6152  * It is however safe to schedule and evaluate multiple computations on different executions
6153  * concurrently. (Concurrent uses of {@link ANeuralNetworksExecution_burstCompute} must be on
6154  * different burst objects.) The runtime makes no guarantee on the ordering of
6155  * completion of executions. If it's important to the application, the
6156  * application should enforce the ordering by ensuring that one execution
6157  * completes before the next is scheduled (for example, by scheduling all
6158  * executions synchronously within a single thread, or by scheduling all
6159  * executions asynchronously and using {@link ANeuralNetworksEvent_wait} between
6160  * calls to {@link ANeuralNetworksExecution_startCompute}); or by using
6161  * {@link ANeuralNetworksExecution_startComputeWithDependencies} to make the execution wait for a
6162  * list of events to be signaled before starting the actual evaluation.</p>
6163  *
6164  * Available since NNAPI feature level 1.
6165  */
6166 typedef struct ANeuralNetworksExecution ANeuralNetworksExecution;
6167 
6168 /**
6169  * Parameters for ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL operand.
6170  */
6171 typedef struct ANeuralNetworksSymmPerChannelQuantParams {
6172     /** The index of the channel dimension. */
6173     uint32_t channelDim;
6174     /** The size of the scale array. Should be equal to dimension[channelDim] of the Operand. */
6175     uint32_t scaleCount;
6176     /** The array of scaling values for each channel. Each value must be greater than zero. */
6177     const float* scales;
6178 } ANeuralNetworksSymmPerChannelQuantParams;
6179 
6180 /**
6181  * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency
6182  * of a rapid sequence of executions. It will likely cause overhead if only used
6183  * for a single execution.
6184  *
6185  * ANeuralNetworksBurst serves as a context object for any number of inferences
6186  * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst
6187  * object and the {@link ANeuralNetworksExecution} objects used with it must all
6188  * have been created from the same {@link ANeuralNetworksCompilation} object.
6189  *
6190  * This object is also used as a hint to drivers, providing insight to the
6191  * lifetime of a rapid sequence of executions. For example, a driver may choose
6192  * to increase the clock frequency of its accelerator for the lifetime of a
6193  * burst object.
6194  *
6195  * <p>To use:<ul>
6196  *    <li>Create a new burst object by calling the
6197  *        {@link ANeuralNetworksBurst_create} function.</li>
6198  *    <li>For each execution:</li><ul>
6199  *        <li>Create {@link ANeuralNetworksExecution} and configure its
6200  *            properties (see {@link ANeuralNetworksExecution} for details).</li>
6201  *        <li>Apply the model synchronously with
6202  *            {@link ANeuralNetworksExecution_burstCompute}, reusing the same
6203  *            {@link ANeuralNetworksBurst} with the new
6204  *            {@link ANeuralNetworksExecution}.</li>
6205  *        <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul>
6206  *    <li>Destroy the burst with
6207  *        {@link ANeuralNetworksBurst_free}.</li></ul></p>
6208  *
6209  * Available since NNAPI feature level 3.
6210  */
6211 typedef struct ANeuralNetworksBurst ANeuralNetworksBurst;
6212 
6213 /**
6214  * ANeuralNetworksOperandType describes the type of an operand.
6215  *
6216  * This structure is used to describe both scalars and tensors.
6217  *
6218  * A tensor operand type with all dimensions specified is "fully
6219  * specified".  Whenever possible (i.e., whenever the dimensions are
6220  * known at model construction time), a tensor operand type should be
6221  * (but is not required to be) fully specified, in order to enable the
6222  * best possible performance.
6223  *
6224  * If a tensor operand's type is not fully specified, the dimensions
6225  * of the operand are deduced from the operand types and values of the
6226  * operation for which that operand is an output or from the corresponding
6227  * {@link ANEURALNETWORKS_IF} or {@link ANEURALNETWORKS_WHILE} operation input
6228  * operand type in the case of referenced model input operands.
6229  *
6230  * <p>In the following situations, a tensor operand type must be fully
6231  * specified:<ul>
6232  *     <li>The operand has a constant value, set by
6233  *         {@link ANeuralNetworksModel_setOperandValue} (with a
6234  *         non-nullptr buffer) or
6235  *         {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li>
6236  *     <li>The operand is a model input (see
6237  *         {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main
6238  *         model within a compilation.  A fully specified tensor operand type
6239  *         must either be provided to {@link ANeuralNetworksModel_addOperand};
6240  *         or it must be provided to the corresponding
6241  *         {@link ANeuralNetworksExecution_setInput}, or
6242  *         {@link ANeuralNetworksExecution_setInputFromMemory}.
6243  *         EXCEPTION: If the input is optional and omitted
6244  *         (by passing nullptr for buffer to
6245  *         {@link ANeuralNetworksExecution_setInput}) then it need
6246  *         not have a fully specified tensor operand type.</li>
6247  *     <li>The operand is a model output (see
6248  *         {@link ANeuralNetworksModel_identifyInputsAndOutputs}) of the main
6249  *         model within a compilation and is to be used with {@link
6250  *         ANeuralNetworksExecution_startComputeWithDependencies}.
6251  *         A fully specified tensor operand type must either be provided
6252  *         to {@link ANeuralNetworksModel_addOperand}; or it must be
6253  *         provided to the corresponding
6254  *         {@link ANeuralNetworksExecution_setOutput}, or
6255  *         {@link ANeuralNetworksExecution_setOutputFromMemory}.</li></ul>
6256  *
6257  * A tensor operand type of specified rank but some number of
6258  * unspecified dimensions is represented by setting dimensionCount to
6259  * the rank and each unspecified dimension to 0.
6260  *
6261  * Available since NNAPI feature level 1.
6262  *
6263  * Starting at NNAPI feature level 3, a tensor operand type of unspecified rank is
6264  * represented by setting dimensionCount to 0 and dimensions to NULL (just as if
6265  * it were a scalar operand type).
6266  */
6267 typedef struct ANeuralNetworksOperandType {
6268     /**
6269      * The data type, e.g ANEURALNETWORKS_FLOAT32.
6270      */
6271     int32_t type;
6272 
6273     /**
6274      * The number of dimensions (rank).
6275      *
6276      * Must be 0 for scalars.
6277      */
6278     uint32_t dimensionCount;
6279 
6280     /**
6281      * The dimensions of the tensor.
6282      *
6283      * Must be nullptr for scalars.
6284      */
6285     const uint32_t* dimensions;
6286 
6287     /**
6288      * The quantization scale.
6289      *
6290      * Must be 0 when not applicable to an operand type.
6291      *
6292      * See {@link OperandCode}.
6293      */
6294     float scale;
6295 
6296     /**
6297      * The quantization zero point.
6298      *
6299      * Must be 0 when not applicable to an operand type.
6300      *
6301      * See {@link OperandCode}.
6302      */
6303     int32_t zeroPoint;
6304 } ANeuralNetworksOperandType;
6305 
6306 /**
6307  * Aliasing to {@link OperationCode}, used in function
6308  * {@link ANeuralNetworksModel_addOperation}.
6309  */
6310 typedef int32_t ANeuralNetworksOperationType;
6311 
6312 /**
6313  * ANeuralNetworksEvent is an opaque type that represents an event
6314  * that will be signaled once an execution completes.
6315  *
6316  * Available since NNAPI feature level 1.
6317  */
6318 typedef struct ANeuralNetworksEvent ANeuralNetworksEvent;
6319 
6320 /**
6321  * ANeuralNetworksDevice is an opaque type that represents a device.
6322  *
6323  * This type is used to query basic properties and supported operations of the corresponding
6324  * device, and control which device(s) a model is to be run on.
6325  *
6326  * Available since NNAPI feature level 3.
6327  */
6328 typedef struct ANeuralNetworksDevice ANeuralNetworksDevice;
6329 
6330 /**
6331  * ANeuralNetworksMemoryDesc is an opaque type that represents a memory descriptor.
6332  *
6333  * A memory descriptor describes the properties of a memory object, and is used by
6334  * {@link ANeuralNetworksMemory_createFromDesc}.
6335  *
6336  * To use:
6337  *   - Create a new memory descriptor by calling {@link ANeuralNetworksMemoryDesc_create}.
6338  *   - Specify all of the intended input and output roles by calling
6339  *     {@link ANeuralNetworksMemoryDesc_addInputRole} and
6340  *     {@link ANeuralNetworksMemoryDesc_addOutputRole}.
6341  *   - Optionally, specify the memory dimensions by calling
6342  *     {@link ANeuralNetworksMemoryDesc_setDimensions}.
6343  *   - Complete the memory descriptor with {@link ANeuralNetworksMemoryDesc_finish}.
6344  *   - Use the memory descriptor as many times as needed with
6345  *     {@link ANeuralNetworksMemory_createFromDesc}.
6346  *   - Destroy the memory descriptor with {@link ANeuralNetworksMemoryDesc_free}.
6347  *
6348  * A memory descriptor is completed by calling {@link ANeuralNetworksMemoryDesc_finish}.
6349  * A memory descriptor is destroyed by calling {@link ANeuralNetworksMemoryDesc_free}.
6350  *
6351  * A memory descriptor must not be modified once {@link ANeuralNetworksMemoryDesc_finish}
6352  * has been called on it.
6353  *
6354  * It is the application's responsibility to make sure that only
6355  * one thread modifies a memory descriptor at a given time. It is however
6356  * safe for more than one thread to use the memory descriptor once
6357  * {@link ANeuralNetworksMemoryDesc_finish} has returned.
6358  *
6359  * It is also the application's responsibility to ensure that there are no other
6360  * uses of the memory descriptor after calling {@link ANeuralNetworksMemoryDesc_free}.
6361  * It is however safe to continue using a {@link ANeuralNetworksMemory} object created
6362  * from the memory descriptor.
6363  *
6364  * Available since NNAPI feature level 4.
6365  */
6366 typedef struct ANeuralNetworksMemoryDesc ANeuralNetworksMemoryDesc;
6367 
6368 __END_DECLS
6369 
6370 #endif  // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_TYPES_H
6371 
6372 /** @} */
6373