1 /* 2 * Copyright (C) 2017 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 /** 18 * @addtogroup NeuralNetworks 19 * @{ 20 */ 21 22 /** 23 * @file NeuralNetworks.h 24 */ 25 26 #ifndef ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H 27 #define ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H 28 29 /****************************************************************** 30 * 31 * IMPORTANT NOTICE: 32 * 33 * This file is part of Android's set of stable system headers 34 * exposed by the Android NDK (Native Development Kit). 35 * 36 * Third-party source AND binary code relies on the definitions 37 * here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES. 38 * 39 * - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES) 40 * - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS 41 * - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY 42 * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES 43 */ 44 45 #include <android/hardware_buffer.h> 46 #include <stddef.h> 47 #include <stdint.h> 48 #include <sys/cdefs.h> 49 50 __BEGIN_DECLS 51 52 /** 53 * Operand types. 54 * 55 * The type of operands that can be added to a model. 56 * 57 * Although we define many types, most operators accept just a few 58 * types. Most used are {@link ANEURALNETWORKS_TENSOR_FLOAT32}, 59 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 60 * and {@link ANEURALNETWORKS_INT32}. 61 * 62 * Available since API level 27. 63 */ 64 typedef enum { 65 /** A 32 bit floating point scalar value. */ 66 ANEURALNETWORKS_FLOAT32 = 0, 67 /** A signed 32 bit integer scalar value. */ 68 ANEURALNETWORKS_INT32 = 1, 69 /** An unsigned 32 bit integer scalar value. */ 70 ANEURALNETWORKS_UINT32 = 2, 71 /** A tensor of 32 bit floating point values. */ 72 ANEURALNETWORKS_TENSOR_FLOAT32 = 3, 73 /** A tensor of 32 bit integer values. */ 74 ANEURALNETWORKS_TENSOR_INT32 = 4, 75 /** 76 * A tensor of 8 bit unsigned integers that represent real numbers. 77 * 78 * Attached to this tensor are two numbers that can be used to convert the 79 * 8 bit integer to the real value and vice versa. These two numbers are: 80 * - scale: a 32 bit floating point value greater than zero. 81 * - zeroPoint: a 32 bit integer, in range [0, 255]. 82 * 83 * The formula is: 84 * real_value = (integer_value - zeroPoint) * scale. 85 */ 86 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM = 5, 87 #if __ANDROID_API__ >= __ANDROID_API_Q__ 88 /** 89 * An 8 bit boolean scalar value. 90 * 91 * Values of this operand type are either true or false. A zero value 92 * represents false; any other value represents true. 93 * 94 * Available since API level 29. 95 */ 96 ANEURALNETWORKS_BOOL = 6, 97 /** 98 * A tensor of 16 bit signed integers that represent real numbers. 99 * 100 * Attached to this tensor is a number representing real value scale that is 101 * used to convert the 16 bit number to a real value in the following way: 102 * realValue = integerValue * scale. 103 * 104 * scale is a 32 bit floating point with value greater than zero. 105 * 106 * Available since API level 29. 107 */ 108 ANEURALNETWORKS_TENSOR_QUANT16_SYMM = 7, 109 /** 110 * A tensor of IEEE 754 16 bit floating point values. 111 * 112 * Available since API level 29. 113 */ 114 ANEURALNETWORKS_TENSOR_FLOAT16 = 8, 115 /** 116 * A tensor of 8 bit boolean values. 117 * 118 * Values of this operand type are either true or false. A zero value 119 * represents false; any other value represents true. 120 * 121 * Available since API level 29. 122 */ 123 ANEURALNETWORKS_TENSOR_BOOL8 = 9, 124 /** 125 * An IEEE 754 16 bit floating point scalar value. 126 * 127 * Available since API level 29. 128 */ 129 ANEURALNETWORKS_FLOAT16 = 10, 130 /** 131 * A tensor of 8 bit signed integers that represent real numbers. 132 * 133 * This tensor is associated with additional fields that can 134 * be used to convert the 8 bit signed integer to the real value and vice versa. 135 * These fields are: 136 * - channelDim: a 32 bit unsigned integer indicating channel dimension. 137 * - scales: an array of positive 32 bit floating point values. 138 * The size of the scales array must be equal to dimensions[channelDim]. 139 * 140 * {@link ANeuralNetworksModel_setOperandSymmPerChannelQuantParams} must be used 141 * to set the parameters for an Operand of this type. 142 * 143 * The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0). 144 * 145 * The formula is: 146 * realValue[..., C, ...] = 147 * integerValue[..., C, ...] * scales[C] 148 * where C is an index in the Channel dimension. 149 * 150 * Available since API level 29. 151 */ 152 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL = 11, 153 154 /** 155 * A tensor of 16 bit unsigned integers that represent real numbers. 156 * 157 * Attached to this tensor are two numbers that can be used to convert the 158 * 16 bit integer to the real value and vice versa. These two numbers are: 159 * - scale: a 32 bit floating point value greater than zero. 160 * - zeroPoint: a 32 bit integer, in range [0, 65535]. 161 * 162 * The formula is: 163 * real_value = (integer_value - zeroPoint) * scale. 164 * 165 * Available since API level 29. 166 */ 167 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM = 12, 168 169 /** 170 * A tensor of 8 bit signed integers that represent real numbers. 171 * 172 * Attached to this tensor is a number representing real value scale that is 173 * used to convert the 8 bit number to a real value in the following way: 174 * realValue = integerValue * scale. 175 * 176 * scale is a 32 bit floating point with value greater than zero. 177 * 178 * Available since API level 29. 179 */ 180 ANEURALNETWORKS_TENSOR_QUANT8_SYMM = 13, 181 #endif // __ANDROID_API__ >= __ANDROID_API_Q__ 182 183 } OperandCode; 184 185 /** 186 * Operation types. 187 * 188 * The type of operations that can be added to a model. 189 * 190 * Available since API level 27. 191 */ 192 typedef enum { 193 // Operations below are available since API level 27. 194 195 /** 196 * Adds two tensors, element-wise. 197 * 198 * Takes two input tensors of identical {@link OperandCode} and compatible 199 * dimensions. The output is the sum of both input tensors, optionally 200 * modified by an activation function. 201 * 202 * Two dimensions are compatible when: 203 * 1. they are equal, or 204 * 2. one of them is 1 205 * 206 * The size of the output is the maximum size along each dimension of the 207 * input operands. It starts with the trailing dimensions, and works its 208 * way forward. 209 * 210 * Example: 211 * 212 * input1.dimension = {4, 1, 2} 213 * input2.dimension = {5, 4, 3, 1} 214 * output.dimension = {5, 4, 3, 2} 215 * 216 * Since API level 29, generic zero-sized input tensor is supported. Zero 217 * dimension is only compatible with 0 or 1. The size of the output 218 * dimension is zero if either of corresponding input dimension is zero. 219 * 220 * Supported tensor {@link OperandCode}: 221 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 222 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 223 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 224 * 225 * Supported tensor rank: up to 4 226 * 227 * Inputs: 228 * * 0: A tensor. 229 * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions 230 * as input0. 231 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 232 * the scales and zeroPoint can be different from input0 scale and zeroPoint. 233 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 234 * {@link FuseCode} values. Specifies the activation to 235 * invoke on the result. 236 * 237 * Outputs: 238 * * 0: The sum, a tensor of the same {@link OperandCode} as input0. 239 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 240 * the scale and zeroPoint can be different from inputs' scale and zeroPoint. 241 * 242 * Available since API level 27. 243 */ 244 ANEURALNETWORKS_ADD = 0, 245 246 /** 247 * Performs a 2-D average pooling operation. 248 * 249 * The output dimensions are functions of the filter dimensions, stride, and 250 * padding. 251 * 252 * The values in the output tensor are computed as: 253 * 254 * output[b, i, j, channel] = 255 * sum_{di, dj}( 256 * input[b, strides[1] * i + di, strides[2] * j + dj, channel] 257 * ) / sum(1) 258 * 259 * Supported tensor {@link OperandCode}: 260 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 261 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 262 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 263 * 264 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 265 * With the default data layout NHWC, the data is stored in the order of: 266 * [batch, height, width, channels]. Alternatively, the data layout could 267 * be NCHW, the data storage order of: [batch, channels, height, width]. 268 * 269 * Both explicit padding and implicit padding are supported. 270 * 271 * Inputs (explicit padding): 272 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 273 * the input. Since API level 29, zero batches is supported for this 274 * tensor. 275 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 276 * the left, in the ‘width’ dimension. 277 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 278 * the right, in the ‘width’ dimension. 279 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 280 * the top, in the ‘height’ dimension. 281 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 282 * the bottom, in the ‘height’ dimension. 283 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 284 * walking through input in the ‘width’ dimension. 285 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 286 * walking through input in the ‘height’ dimension. 287 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 288 * width. 289 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 290 * height. 291 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 292 * {@link FuseCode} values. Specifies the activation to 293 * invoke on the result. 294 * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 295 * Set to true to specify NCHW data layout for input0 and output0. 296 * Available since API level 29. 297 * 298 * Inputs (implicit padding): 299 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 300 * the input. Since API level 29, zero batches is supported for this 301 * tensor. 302 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit 303 * padding scheme, has to be one of the 304 * {@link PaddingCode} values. 305 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 306 * walking through input in the ‘width’ dimension. 307 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 308 * walking through input in the ‘height’ dimension. 309 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 310 * width. 311 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 312 * height. 313 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 314 * {@link FuseCode} values. Specifies the activation to 315 * invoke on the result. 316 * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 317 * Set to true to specify NCHW data layout for input0 and output0. 318 * Available since API level 29. 319 * 320 * Outputs: 321 * * 0: The output 4-D tensor, of shape 322 * [batches, out_height, out_width, depth]. 323 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 324 * the scale and zeroPoint must be the same as input0. 325 * 326 * Available since API level 27. 327 */ 328 ANEURALNETWORKS_AVERAGE_POOL_2D = 1, 329 330 /** 331 * Concatenates the input tensors along the given dimension. 332 * 333 * The input tensors must have identical {@link OperandCode} and the same 334 * dimensions except the dimension along the concatenation axis. 335 * 336 * Supported tensor {@link OperandCode}: 337 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 338 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 339 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (full support since API 340 * level 29, see the input section) 341 * 342 * Supported tensor rank: up to 4 343 * 344 * Inputs: 345 * * 0 ~ n-1: The list of n input tensors, of shape 346 * [D0, D1, ..., Daxis(i), ..., Dm]. 347 * Before API level 29, all input tensors of 348 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 349 * must have the same scale and zeroPoint as the output tensor. 350 * Since API level 29, zero-sized tensors are supported. 351 * * n: An {@link ANEURALNETWORKS_INT32} scalar, specifying the 352 * concatenation axis. 353 * 354 * Outputs: 355 * * 0: The output, a tensor of the same {@link OperandCode} as the input 356 * tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm]. 357 * Since API level 29, for a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 358 * the scale and zeroPoint values can be different from 359 * input tensors. Before API level 29 they have to be the same as for the input tensors. 360 * 361 * Available since API level 27. 362 */ 363 ANEURALNETWORKS_CONCATENATION = 2, 364 365 /** 366 * Performs an 2-D convolution operation. 367 * 368 * The CONV_2D op sweeps a 2-D filter that can mix channels together over a 369 * batch of images, applying the filter to each window of each image of the 370 * appropriate size. 371 * 372 * The output dimensions are functions of the filter dimensions, stride, and 373 * padding. 374 * 375 * The values in the output tensor are computed as: 376 * 377 * output[b, i, j, channel] = 378 * sum_{di, dj, k} ( 379 * input[b, strides[1] * i + di, strides[2] * j + dj, k] * 380 * filter[channel, di, dj, k] 381 * ) + bias[channel] 382 * 383 * Supported tensor {@link OperandCode} configurations: 384 * * 32 bit floating point: 385 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. 386 * 387 * * Quantized: 388 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. 389 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to 390 * * * input.scale * filter.scale). 391 * 392 * Available since API level 29: 393 * * 16 bit floating point: 394 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. 395 * 396 * * Quantized with symmetric per channel quantization for the filter: 397 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. 398 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. 399 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, 400 * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). 401 * 402 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 403 * With the default data layout NHWC, the data is stored in the order of: 404 * [batch, height, width, channels]. Alternatively, the data layout could 405 * be NCHW, the data storage order of: [batch, channels, height, width]. 406 * 407 * Both explicit padding and implicit padding are supported. 408 * 409 * Inputs (explicit padding): 410 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 411 * specifying the input. Since API level 29, zero batches is supported 412 * for this tensor. 413 * * 1: A 4-D tensor, of shape 414 * [depth_out, filter_height, filter_width, depth_in], specifying the 415 * filter. For tensor of type 416 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel 417 * dimension (extraParams.channelQuant.channelDim) must be set to 0. 418 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 419 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or 420 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same 421 * type. For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 422 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint 423 * of 0 and bias_scale == input_scale * filter_scale. For filter tensor 424 * of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias 425 * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 426 * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to 427 * bias_scale[i] = input_scale * filter_scale[i]. 428 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 429 * the left, in the ‘width’ dimension. 430 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 431 * the right, in the ‘width’ dimension. 432 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 433 * the top, in the ‘height’ dimension. 434 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 435 * the bottom, in the ‘height’ dimension. 436 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 437 * walking through input in the ‘width’ dimension. 438 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 439 * walking through input in the ‘height’ dimension. 440 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 441 * {@link FuseCode} values. Specifies the activation to 442 * invoke on the result. 443 * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 444 * Set to true to specify NCHW data layout for input0 and output0. 445 * Available since API level 29. 446 * * 11: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 447 * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped 448 * cells between each filter element on width dimension. If this input is set, 449 * input 12 (dilation factor for height) must be specified as well. 450 * Available since API level 29. 451 * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 452 * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped 453 * cells between each filter element on height dimension. If this input is set, 454 * input 11 (dilation factor for width) must be specified as well. 455 * Available since API level 29. 456 * 457 * Inputs (implicit padding): 458 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 459 * specifying the input. Since API level 29, zero batches is supported 460 * for this tensor. 461 * * 1: A 4-D tensor, of shape 462 * [depth_out, filter_height, filter_width, depth_in], specifying the 463 * filter. For tensor of type 464 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel 465 * dimension (extraParams.channelQuant.channelDim) must be set to 0. 466 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 467 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or 468 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same 469 * type. For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 470 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint 471 * of 0 and bias_scale == input_scale * filter_scale. For filter tensor 472 * of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias 473 * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 474 * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to 475 * bias_scale[i] = input_scale * filter_scale[i]. 476 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit 477 * padding scheme, has to be one of the 478 * {@link PaddingCode} values. 479 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 480 * walking through input in the ‘width’ dimension. 481 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 482 * walking through input in the ‘height’ dimension. 483 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 484 * {@link FuseCode} values. Specifies the activation to 485 * invoke on the result. 486 * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 487 * Set to true to specify NCHW data layout for input0 and output0. 488 * Available since API level 29. 489 * * 8: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 490 * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped 491 * cells between each filter element on width dimension. If this input is set, 492 * input 9 (dilation factor for height) must be specified as well. 493 * Available since API level 29. 494 * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 495 * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped 496 * cells between each filter element on height dimension. If this input is set, 497 * input 8 (dilation factor for width) must be specified as well. 498 * Available since API level 29. 499 * 500 * Outputs: 501 * * 0: The output 4-D tensor, of shape 502 * [batches, out_height, out_width, depth_out]. Before API level 29, 503 * for output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 504 * the following condition must be satisfied: 505 * output_scale > input_scale * filter_scale 506 * 507 * Available since API level 27. 508 */ 509 ANEURALNETWORKS_CONV_2D = 3, 510 511 /** 512 * Performs a depthwise 2-D convolution operation. 513 * 514 * Given an input tensor of shape [batches, height, width, depth_in] and a 515 * filter tensor of shape [1, filter_height, filter_width, depth_out] 516 * containing depth_out convolutional filters of depth 1, DEPTHWISE_CONV 517 * applies a different filter to each input channel (expanding from 1 518 * channel to channel_multiplier channels for each), then concatenates the 519 * results together. 520 * 521 * The output has depth_out = depth_in * depth_multiplier channels. 522 * The output dimensions are functions of the filter dimensions, stride, and 523 * padding. 524 * 525 * The values in the output tensor are computed as: 526 * 527 * output[b, i, j, k * channel_multiplier + q] = 528 * sum_{di, dj} ( 529 * input[b, strides[1] * i + di, strides[2] * j + dj, k] * 530 * filter[1, di, dj, k * channel_multiplier + q] 531 * ) + bias[k * channel_multiplier + q] 532 * 533 * Supported tensor {@link OperandCode} configurations: 534 * * 32 bit floating point: 535 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. 536 * 537 * * Quantized: 538 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. 539 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to 540 * * * input.scale * filter.scale). 541 * 542 * Available since API level 29: 543 * * 16 bit floating point: 544 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. 545 * 546 * * Quantized with symmetric per channel quantization for the filter: 547 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. 548 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. 549 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, 550 * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). 551 * 552 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 553 * With the default data layout NHWC, the data is stored in the order of: 554 * [batch, height, width, channels]. Alternatively, the data layout could 555 * be NCHW, the data storage order of: [batch, channels, height, width]. 556 * 557 * Both explicit padding and implicit padding are supported. 558 * 559 * Inputs (explicit padding): 560 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 561 * specifying the input. 562 * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], 563 * specifying the filter. For tensor of type 564 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel 565 * dimension (extraParams.channelQuant.channelDim) must be set to 3. 566 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 567 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or 568 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same 569 * type. For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 570 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint 571 * of 0 and bias_scale == input_scale * filter_scale. For filter tensor 572 * of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias 573 * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 574 * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to 575 * bias_scale[i] = input_scale * filter_scale[i]. 576 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 577 * the left, in the ‘width’ dimension. 578 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 579 * the right, in the ‘width’ dimension. 580 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 581 * the top, in the ‘height’ dimension. 582 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 583 * the bottom, in the ‘height’ dimension. 584 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 585 * walking through input in the ‘width’ dimension. 586 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 587 * walking through input in the ‘height’ dimension. 588 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise 589 * multiplier. 590 * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 591 * {@link FuseCode} values. Specifies the activation to 592 * invoke on the result. 593 * * 11: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 594 * Set to true to specify NCHW data layout for input0 and output0. 595 * Available since API level 29. 596 * * 12: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 597 * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped 598 * cells between each filter element on width dimension. If this input is set, 599 * input 13 (dilation factor for height) must be specified as well. 600 * Available since API level 29. 601 * * 13: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 602 * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped 603 * cells between each filter element on height dimension. If this input is set, 604 * input 12 (dilation factor for width) must be specified as well. 605 * Available since API level 29. 606 * 607 * Inputs (implicit padding): 608 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 609 * specifying the input. 610 * * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out], 611 * specifying the filter. 612 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 613 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or 614 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same 615 * type. For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 616 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint 617 * of 0 and bias_scale == input_scale * filter_scale. For filter tensor 618 * of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias 619 * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 620 * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to 621 * bias_scale[i] = input_scale * filter_scale[i]. 622 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit 623 * padding scheme, has to be one of the 624 * {@link PaddingCode} values. 625 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 626 * walking through input in the ‘width’ dimension. 627 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 628 * walking through input in the ‘height’ dimension. 629 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the depthwise 630 * multiplier. 631 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 632 * {@link FuseCode} values. Specifies the activation to 633 * invoke on the result. 634 * * 8: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 635 * Set to true to specify NCHW data layout for input0 and output0. 636 * Available since API level 29. 637 * * 9: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 638 * factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped 639 * cells between each filter element on width dimension. If this input is set, 640 * input 10 (dilation factor for height) must be specified as well. 641 * Available since API level 29. 642 * * 10: An optional {@link ANEURALNETWORKS_INT32} scalar, specifying the dilation 643 * factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped 644 * cells between each filter element on height dimension. If this input is set, 645 * input 9 (dilation factor for width) must be specified as well. 646 * Available since API level 29. 647 648 * 649 * Outputs: 650 * * 0: The output 4-D tensor, of shape 651 * [batches, out_height, out_width, depth_out]. Before API level 29, 652 * for output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 653 * the following condition must be satisfied: 654 * output_scale > input_scale * filter_scale 655 * 656 * Available since API level 27. 657 */ 658 ANEURALNETWORKS_DEPTHWISE_CONV_2D = 4, 659 660 /** 661 * Rearranges data from depth into blocks of spatial data. 662 * 663 * More specifically, this op outputs a copy of the input tensor where 664 * values from the depth dimension are moved in spatial blocks to the height 665 * and width dimensions. The value block_size indicates the input block size 666 * and how the data is moved. 667 * 668 * Chunks of data of size block_size * block_size from depth are rearranged 669 * into non-overlapping blocks of size block_size x block_size. 670 * 671 * The width of the output tensor is input_depth * block_size, whereas the 672 * height is input_height * block_size. The depth of the input tensor must 673 * be divisible by block_size * block_size 674 * 675 * Supported tensor {@link OperandCode}: 676 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 677 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 678 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 679 * 680 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 681 * With the default data layout NHWC, the data is stored in the order of: 682 * [batch, height, width, channels]. Alternatively, the data layout could 683 * be NCHW, the data storage order of: [batch, channels, height, width]. 684 * 685 * Inputs: 686 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 687 * specifying the input. 688 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size. 689 * block_size must be >=1 and block_size * block_size must be a divisor 690 * of the input depth. 691 * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 692 * Set to true to specify NCHW data layout for input0 and output0. 693 * Available since API level 29. 694 * 695 * Outputs: 696 * * 0: The output 4-D tensor, of shape [batch, height*block_size, 697 * width*block_size, depth/(block_size*block_size)]. 698 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 699 * the scale and zeroPoint must be the same as input0. 700 * 701 * Available since API level 27. 702 */ 703 ANEURALNETWORKS_DEPTH_TO_SPACE = 5, 704 705 /** 706 * Dequantizes the input tensor. 707 * 708 * The formula is: 709 * 710 * output = (input - zeroPoint) * scale. 711 * 712 * Supported input tensor {@link OperandCode}: 713 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 714 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM} (since API level 29) 715 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} (since API level 29) 716 * 717 * Supported output tensor {@link OperandCode}: 718 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 719 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}. 720 * 721 * Supported tensor rank: up to 4 722 * 723 * Inputs: 724 * * 0: A tensor. Since API level 29, this tensor may be zero-sized. 725 * 726 * Outputs: 727 * * 0: A tensor with the same shape as input0. 728 * 729 * Available since API level 27. 730 */ 731 ANEURALNETWORKS_DEQUANTIZE = 6, 732 733 /** 734 * Looks up sub-tensors in the input tensor. 735 * 736 * This operator takes for input a tensor of values (Values) and 737 * a one-dimensional tensor of selection indices (Lookups). 738 * The output tensor is the concatenation of sub-tensors of Values as 739 * selected by Lookups. 740 * 741 * Think of Values as being sliced along its first dimension: 742 * The entries in Lookups select which slices are concatenated together 743 * to create the output tensor. 744 * 745 * For example, if Values has shape of [40, 200, 300] and 746 * Lookups has shape of [3], all three values found in Lookups are 747 * expected to be between 0 and 39. The resulting tensor must 748 * have shape of [3, 200, 300]. 749 * 750 * If a value in Lookups is out of bounds, the operation must fail 751 * and an error must be reported. 752 * 753 * Supported value tensor {@link OperandCode}: 754 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 755 * * {@link ANEURALNETWORKS_TENSOR_INT32} 756 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 757 * 758 * Supported value tensor rank: from 2 759 * 760 * Inputs: 761 * * 0: Lookups. A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. 762 * The values are indices into the first dimension of Values. 763 * * 1: Values. An n-D tensor, where n >= 2, from which sub-tensors are 764 * extracted. 765 * 766 * Output: 767 * * 0: A n-D tensor with the same rank and shape as the Values 768 * tensor, except for the first dimension which has the same size 769 * as Lookups' only dimension. 770 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 771 * the scale and zeroPoint must be the same as input1. 772 * 773 * Available since API level 27. 774 */ 775 ANEURALNETWORKS_EMBEDDING_LOOKUP = 7, 776 777 /** 778 * Computes element-wise floor() on the input tensor. 779 * 780 * Supported tensor {@link OperandCode}: 781 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 782 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 783 * 784 * Supported tensor rank: up to 4 785 * 786 * Inputs: 787 * * 0: A tensor. 788 * 789 * Outputs: 790 * * 0: The output tensor, of the same {@link OperandCode} and dimensions as 791 * the input tensor. 792 * 793 * Available since API level 27. 794 */ 795 ANEURALNETWORKS_FLOOR = 8, 796 797 /** 798 * Denotes a fully (densely) connected layer, which connects all elements 799 * in the input tensor with each element in the output tensor. 800 * 801 * This layer implements the operation: 802 * 803 * outputs = activation(inputs * weights’ + bias) 804 * 805 * Supported tensor {@link OperandCode}: 806 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 807 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 808 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 809 * 810 * Supported tensor rank: up to 4. 811 * 812 * Inputs: 813 * * 0: A tensor of at least rank 2, specifying the input. If rank is 814 * greater than 2, then it gets flattened to a 2-D Tensor. The 815 * (flattened) 2-D Tensor is reshaped (if necessary) to 816 * [batch_size, input_size], where "input_size" corresponds to the 817 * number of inputs to the layer, matching the second dimension of 818 * weights, and "batch_size" is calculated by dividing the number of 819 * elements by "input_size". Since API level 29, zero batch_size is 820 * supported for this tensor. 821 * * 1: A 2-D tensor, specifying the weights, of shape 822 * [num_units, input_size], where "num_units" corresponds to the number 823 * of output nodes. 824 * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input 825 * tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias should 826 * also be of {@link ANEURALNETWORKS_TENSOR_FLOAT32}. For input tensor 827 * of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias should be 828 * of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and 829 * bias_scale == input_scale * filter_scale. 830 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 831 * {@link FuseCode} values. Specifies the activation to 832 * invoke on the result. 833 * 834 * Outputs: 835 * * 0: The output tensor, of shape [batch_size, num_units]. Before API 836 * level 29, for output tensor of {@link 837 * ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the following condition must 838 * be satisfied: output_scale > input_scale * filter_scale. 839 * 840 * Available since API level 27. 841 */ 842 ANEURALNETWORKS_FULLY_CONNECTED = 9, 843 844 /** 845 * Looks up sub-tensors in the input tensor using a key-value map. 846 * 847 * This operator takes for input a tensor of values (Values), 848 * a one-dimensional tensor of selection values (Lookups) and 849 * a one-dimensional tensor that maps these values to Values 850 * indexes. The output tensor is the concatenation of sub-tensors of 851 * Values as selected by Lookups via Keys. 852 * 853 * Think of Values as being sliced along its outer-most dimension. 854 * The output is a concatenation of selected slices, with one slice 855 * for each entry of Lookups. The slice selected is the one at the 856 * same index as the Maps entry that matches the value in Lookups. 857 * 858 * For a hit, the corresponding sub-tensor of Values is included 859 * in the Output tensor. For a miss, the corresponding sub-tensor in 860 * Output must have zero values. 861 * 862 * For example, if Values has shape of [40, 200, 300], 863 * Keys should have a shape of [40]. If Lookups tensor has shape 864 * of [3], three slices are being concatenated, so the resulting tensor 865 * must have the shape of [3, 200, 300]. If the first entry in Lookups 866 * has the value 123456, that value must be located in Keys tensor. 867 * If the sixth entry of Keys contains 123456, the sixth slice of Values 868 * must be selected. If no entry in Keys has 123456, a slice of zeroes 869 * must be concatenated. 870 * 871 * Supported value tensor {@link OperandCode}: 872 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 873 * * {@link ANEURALNETWORKS_TENSOR_INT32} 874 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 875 * 876 * Supported value tensor rank: from 2 877 * 878 * Inputs: 879 * * 0: Lookups. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with 880 * shape [ k ]. 881 * * 1: Keys. A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape 882 * [ n ]; Keys and Values pair represent a map, i.e., the ith element 883 * in Keys (Keys[i]) is the key to select the ith sub-tensor in Values 884 * (Values[i]), where 0 <= i <= n-1. Keys tensor *MUST* be sorted in 885 * ascending order. 886 * * 2: Values. A tensor with shape of [ n, … ]; i.e., the first dimension 887 * must be n. 888 * 889 * Outputs: 890 * * 0: Output. A tensor with shape [ k …]. 891 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 892 * the scale and zeroPoint must be the same as input2. 893 * * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup 894 * hits (True) or not (False). 895 * Stored as {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} with offset 0 896 * and scale 1.0f. 897 * A non-zero byte represents True, a hit. A zero indicates otherwise. 898 * 899 * Available since API level 27. 900 */ 901 ANEURALNETWORKS_HASHTABLE_LOOKUP = 10, 902 903 /** 904 * Applies L2 normalization along the depth dimension. 905 * 906 * The values in the output tensor are computed as: 907 * 908 * output[batch, row, col, channel] = 909 * input[batch, row, col, channel] / 910 * sqrt(sum_{c} pow(input[batch, row, col, c], 2)) 911 * 912 * For input tensor with rank less than 4, independently normalizes each 913 * 1-D slice along dimension dim. 914 * 915 * Supported tensor {@link OperandCode}: 916 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 917 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 918 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) 919 * 920 * Supported tensor rank: up to 4 921 * Tensors with rank less than 4 are only supported since API level 29. 922 * 923 * Inputs: 924 * * 0: An n-D tensor, specifying the tensor to be normalized. 925 * * 1: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, 926 * specifying the dimension normalization would be performed on. 927 * Negative index is used to specify axis from the end (e.g. -1 for 928 * the last axis). Must be in the range [-n, n). 929 * Available since API level 29. 930 * 931 * Outputs: 932 * * 0: A tensor of the same {@link OperandCode} and same shape as input0. 933 * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 934 * the scale must be 1.f / 128 and the zeroPoint must be 128. 935 * 936 * Available since API level 27. 937 */ 938 ANEURALNETWORKS_L2_NORMALIZATION = 11, 939 940 /** 941 * Performs an 2-D L2 pooling operation. 942 * 943 * The output dimensions are functions of the filter dimensions, stride, and 944 * padding. 945 * 946 * The values in the output tensor are computed as: 947 * 948 * output[b, i, j, c] = 949 * sqrt(sum_{di, dj} pow(input[b, strides[1] * i + di, strides[2] * j + dj, c], 2) / 950 * sum(1)) 951 * 952 * Supported tensor {@link OperandCode}: 953 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 954 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 955 * 956 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 957 * With the default data layout NHWC, the data is stored in the order of: 958 * [batch, height, width, channels]. Alternatively, the data layout could 959 * be NCHW, the data storage order of: [batch, channels, height, width]. 960 * 961 * Both explicit padding and implicit padding are supported. 962 * 963 * Inputs (explicit padding): 964 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 965 * the input. Since API level 29, zero batches is supported for this 966 * tensor. 967 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 968 * the left, in the ‘width’ dimension. 969 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 970 * the right, in the ‘width’ dimension. 971 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 972 * the top, in the ‘height’ dimension. 973 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 974 * the bottom, in the ‘height’ dimension. 975 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 976 * walking through input in the ‘width’ dimension. 977 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 978 * walking through input in the ‘height’ dimension. 979 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 980 * width. 981 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 982 * height. 983 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 984 * {@link FuseCode} values. Specifies the activation to 985 * invoke on the result. 986 * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 987 * Set to true to specify NCHW data layout for input0 and output0. 988 * Available since API level 29. 989 * 990 * Inputs (implicit padding): 991 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 992 * the input. Since API level 29, zero batches is supported for this 993 * tensor. 994 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit 995 * padding scheme, has to be one of the 996 * {@link PaddingCode} values. 997 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 998 * walking through input in the ‘width’ dimension. 999 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 1000 * walking through input in the ‘height’ dimension. 1001 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 1002 * width. 1003 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 1004 * height. 1005 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 1006 * {@link FuseCode} values. Specifies the activation to 1007 * invoke on the result. 1008 * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 1009 * Set to true to specify NCHW data layout for input0 and output0. 1010 * Available since API level 29. 1011 * 1012 * Outputs: 1013 * * 0: The output 4-D tensor, of shape 1014 * [batches, out_height, out_width, depth]. 1015 * 1016 * Available since API level 27. 1017 */ 1018 ANEURALNETWORKS_L2_POOL_2D = 12, 1019 1020 /** 1021 * Applies Local Response Normalization along the depth dimension. 1022 * 1023 * The 4-D input tensor is treated as a 3-D array of 1-D vectors (along the 1024 * last dimension), and each vector is normalized independently. Within a 1025 * given vector, each component is divided by the weighted, squared sum of 1026 * inputs within depth_radius. 1027 * 1028 * The output is calculated using this formula: 1029 * 1030 * sqr_sum[a, b, c, d] = sum( 1031 * pow(input[a, b, c, d - depth_radius : d + depth_radius + 1], 2)) 1032 * output = input / pow((bias + alpha * sqr_sum), beta) 1033 * 1034 * For input tensor with rank less than 4, independently normalizes each 1035 * 1-D slice along specified dimension. 1036 * 1037 * Supported tensor {@link OperandCode}: 1038 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1039 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1040 * 1041 * Supported tensor rank: up to 4 1042 * Tensors with rank less than 4 are only supported since API level 29. 1043 * 1044 * Inputs: 1045 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 1046 * the input. 1047 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the radius of 1048 * the normalization window. 1049 * * 2: A scalar, specifying the bias, must not be zero. 1050 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias 1051 * value must be of {@link ANEURALNETWORKS_FLOAT16}. 1052 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the bias 1053 * value must be of {@link ANEURALNETWORKS_FLOAT32}. 1054 * * 3: A scalar, specifying the scale factor, alpha. 1055 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the 1056 * alpha value must be of {@link ANEURALNETWORKS_FLOAT16}. 1057 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the 1058 * alpha value must be of {@link ANEURALNETWORKS_FLOAT32}. 1059 * * 4: A scalar, specifying the exponent, beta. 1060 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta 1061 * value must be of {@link ANEURALNETWORKS_FLOAT16}. 1062 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta 1063 * value must be of {@link ANEURALNETWORKS_FLOAT32}. 1064 * * 5: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, 1065 * specifying the dimension normalization would be performed on. 1066 * Negative index is used to specify axis from the end (e.g. -1 for 1067 * the last axis). Must be in the range [-n, n). 1068 * Available since API level 29. 1069 * 1070 * Outputs: 1071 * * 0: The output tensor of same shape as input0. 1072 * 1073 * Available since API level 27. 1074 */ 1075 ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION = 13, 1076 1077 /** 1078 * Computes sigmoid activation on the input tensor element-wise. 1079 * 1080 * The output is calculated using this formula: 1081 * 1082 * output = 1 / (1 + exp(-input)) 1083 * 1084 * Supported tensor {@link OperandCode}: 1085 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1086 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1087 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1088 * 1089 * Supported tensor rank: up to 4. 1090 * 1091 * Inputs: 1092 * * 0: A tensor, specifying the input. Since API level 29, this tensor may 1093 * be zero-sized. 1094 * 1095 * Outputs: 1096 * * 0: The output tensor of same shape as input0. 1097 * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 1098 * the scale must be 1.f / 256 and the zeroPoint must be 0. 1099 * 1100 * Available since API level 27. 1101 */ 1102 ANEURALNETWORKS_LOGISTIC = 14, 1103 1104 /** 1105 * Projects an input to a bit vector via locality senstive hashing. 1106 * 1107 * Supported input tensor {@link OperandCode}: 1108 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1109 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1110 * * {@link ANEURALNETWORKS_TENSOR_INT32} 1111 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1112 * 1113 * Supported input tensor rank: from 1 1114 * 1115 * Inputs: 1116 * * 0: Hash functions. Dim.size == 2, DataType: Float. 1117 * Tensor[0].Dim[0]: Number of hash functions. 1118 * Tensor[0].Dim[1]: Number of projected output bits generated by each 1119 * hash function. 1120 * If the projection type is Sparse: 1121 * Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32 1122 * 1123 * * 1: Input. Dim.size >= 1, no restriction on DataType. 1124 * * 2: Weight. Optional. Dim.size == 1, DataType: Float. 1125 * If not set, each input element is considered to have the same weight 1126 * of 1.0. 1127 * Tensor[1].Dim[0] == Tensor[2].Dim[0] 1128 * * 3: Type: 1129 * Sparse: 1130 * Value LSHProjectionType_SPARSE(=3) (since API level 29). 1131 * Computed bit vector is considered to be sparse. 1132 * Each output element is an int32 made up of multiple bits 1133 * computed from hash functions. 1134 * 1135 * NOTE: To avoid collisions across hash functions, an offset value 1136 * of k * (1 << Tensor[0].Dim[1]) will be added to each signature, 1137 * where k is the index of the hash function. 1138 * 1139 * Value LSHProjectionType_SPARSE_DEPRECATED(=1). 1140 * Legacy behavior that does not include the offset value. 1141 * 1142 * Dense: 1143 * Value LSHProjectionType_DENSE(=2). 1144 * Computed bit vector is considered to be dense. Each output 1145 * element represents a bit and can take the value of either 1146 * 0 or 1. 1147 * 1148 * Outputs: 1149 * * 0: If the projection type is Sparse: 1150 * Output.Dim == { Tensor[0].Dim[0] } 1151 * A tensor of int32 that represents hash signatures, 1152 * 1153 * If the projection type is Dense: 1154 * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] } 1155 * A flattened tensor that represents projected bit vectors. 1156 * 1157 * Available since API level 27. 1158 * The offset value for sparse projections was added in API level 29. 1159 */ 1160 ANEURALNETWORKS_LSH_PROJECTION = 15, 1161 1162 /** 1163 * Performs a single time step in a Long Short-Term Memory (LSTM) layer 1164 * 1165 * The LSTM operation is described by the following equations. 1166 * 1167 * \f{eqnarray*}{ 1168 * i_t =& \sigma(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}C_{t-1}+b_i) & \\ 1169 * f_t =& \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}C_{t-1}+b_f) & \\ 1170 * C_t =& clip(f_t \odot C_{t-1} + i_t \odot 1171 * g(W_{xc}x_t+W_{hc}h_{t-1}+b_c),\ t_{cell}) & \\ 1172 * o_t =& \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}C_t+b_o) & \\ 1173 * & & \\ 1174 * & clip(W_{proj}(o_t \odot g(C_t))+b_{proj},\ t_{proj}) 1175 * & if\ there\ is\ a\ projection; \\ 1176 * h_t =& & \\ 1177 * & o_t \odot g(C_t) & otherwise. \\ 1178 * \f} 1179 * Where: 1180 * * \f$x_t\f$ is the input, 1181 * * \f$i_t\f$ is the input gate, 1182 * * \f$f_t\f$ is the forget gate, 1183 * * \f$C_t\f$ is the cell state, 1184 * * \f$o_t\f$ is the output, 1185 * * \f$h_t\f$ is the output state, 1186 * * \f$\sigma\f$ is the logistic sigmoid function, 1187 * * \f$g\f$ is the cell input and cell output activation function, usually 1188 * \f$tahn\f$, 1189 * * \f$W_{xi}\f$ is the input-to-input weight matrix, 1190 * * \f$W_{hi}\f$ is the recurrent to input weight matrix, 1191 * * \f$W_{ci}\f$ is the cell-to-input weight matrix, 1192 * * \f$b_i\f$ is the input gate bias, 1193 * * \f$W_{xf}\f$ is the input-to-forget weight matrix, 1194 * * \f$W_{hf}\f$ is the recurrent-to-forget weight matrix, 1195 * * \f$W_{cf}\f$ is the cell-to-forget weight matrix, 1196 * * \f$b_f\f$ is the forget gate bias, 1197 * * \f$W_{xc}\f$ is the input-to-cell weight matrix, 1198 * * \f$W_{hc}\f$ is the recurrent-to-cell weight matrix, 1199 * * \f$b_c\f$ is the cell bias, 1200 * * \f$W_{xo}\f$ is the input-to-output weight matrix, 1201 * * \f$W_{ho}\f$ is the recurrent-to-output weight matrix, 1202 * * \f$W_{co}\f$ is the cell-to-output weight matrix, 1203 * * \f$b_o\f$ is the output gate bias, 1204 * * \f$W_{proj}\f$ is the projection weight matrix, 1205 * * \f$b_{proj}\f$ is the projection bias, 1206 * * \f$t_{cell}\f$ is the threshold for clipping the cell state, and 1207 * * \f$t_{proj}\f$ is the threshold for clipping the projected output. 1208 * * \f$\odot\f$ is the 1209 * <a href="https://en.wikipedia.org/wiki/Hadamard_product_(matrices)"> 1210 * Hadamard product</a> that takes two matrices and produces another 1211 * matrix, each element of which is the product of the corresponding 1212 * elements of the input matrices. 1213 * 1214 * Since API level 29 LSTM supports layer normalization. 1215 * In case layer normalization is used, the inputs to internal activation 1216 * functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered 1217 * following an approach from section 3.1 from 1218 * https://arxiv.org/pdf/1607.06450.pdf 1219 * 1220 * The operation has the following independently optional inputs: 1221 * * The cell-to-input weights (\f$W_{ci}\f$), cell-to-forget weights 1222 * (\f$W_{cf}\f$) and cell-to-output weights (\f$W_{co}\f$) either all 1223 * have values or neither of them have values (i.e., all set to null). If 1224 * they have values, the peephole optimization is used. 1225 * * The input-to-input weights (\f$W_{xi}\f$), recurrent-to-input weights 1226 * (\f$W_{hi}\f$) and input gate bias (\f$b_i\f$) either all have values, 1227 * or none of them have values. If they have no values, coupling of input 1228 * and forget gates (CIFG) is used, in which case the input gate 1229 * (\f$i_t\f$) is calculated using the following equation instead. 1230 * \f{eqnarray*}{ 1231 * i_t = 1 - f_t 1232 * \f} 1233 * In case peephole optimization is used and CIFG is not used 1234 * cell-to-input (\f$W_{ci}\f$) weights must be present. Otherwise, the 1235 * cell-to-input weights must have no value. 1236 * * The projection weights (\f$W_{proj}\f$) is required only for the 1237 * recurrent projection layer, and should otherwise have no value. 1238 * * The projection bias (\f$b_{proj}\f$) may (but not required to) have a 1239 * value if the recurrent projection layer exists, and should otherwise 1240 * have no value. 1241 * * (API level >= 29) The four layer normalization weights either all have 1242 * values or none of them have values. Additionally, if CIFG is used, 1243 * input layer normalization weights tensor is omitted and the other layer 1244 * normalization weights either all have values or none of them have 1245 * values. Layer normalization is used when the values of all the layer 1246 * normalization weights are present. 1247 * 1248 * References: 1249 * 1250 * The default non-peephole non-CIFG implementation is based on: 1251 * http://www.bioinf.jku.at/publications/older/2604.pdf 1252 * S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural 1253 * Computation, 9(8):1735-1780, 1997. 1254 * 1255 * The peephole implementation and projection layer is based on: 1256 * https://research.google.com/pubs/archive/43905.pdf 1257 * Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory 1258 * recurrent neural network architectures for large scale acoustic 1259 * modeling." INTERSPEECH, 2014. 1260 * (However, the concept of peephole optimization was introduced in work 1261 * prior to this paper.) 1262 * 1263 * The coupling of input and forget gate (CIFG) is based on: 1264 * http://arxiv.org/pdf/1503.04069.pdf 1265 * Greff et al. "LSTM: A Search Space Odyssey" 1266 * 1267 * The layer normalization is based on: 1268 * https://arxiv.org/pdf/1607.06450.pdf 1269 * Jimmy Ba et al. "Layer Normalization" 1270 * 1271 * Supported tensor {@link OperandCode}: 1272 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1273 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1274 * 1275 * All input and output tensors must be of the same type. 1276 * 1277 * Inputs: 1278 * * 0: The input (\f$x_t\f$). 1279 * A 2-D tensor of shape [batch_size, input_size], where “batch_size” 1280 * corresponds to the batching dimension, and “input_size” is the size 1281 * of the input. 1282 * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. 1283 * A 2-D tensor of shape [num_units, input_size], where “num_units” 1284 * corresponds to the number of cell units. 1285 * * 2: The input-to-forget weights (\f$W_{xf}\f$). 1286 * A 2-D tensor of shape [num_units, input_size]. 1287 * * 3: The input-to-cell weights (\f$W_{xc}\f$). 1288 * A 2-D tensor of shape [num_units, input_size]. 1289 * * 4: The input-to-output weights (\f$W_{xo}\f$). 1290 * A 2-D tensor of shape [num_units, input_size]. 1291 * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional. 1292 * A 2-D tensor of shape [num_units, output_size], where “output_size” 1293 * corresponds to either the number of cell units (i.e., “num_units”), 1294 * or the second dimension of the “projection_weights”, if defined. 1295 * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$). 1296 * A 2-D tensor of shape [num_units, output_size]. 1297 * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$). 1298 * A 2-D tensor of shape [num_units, output_size]. 1299 * * 8: The recurrent-to-output weights (\f$W_{ho}\f$). 1300 * A 2-D tensor of shape [num_units, output_size]. 1301 * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional. 1302 * A 1-D tensor of shape [num_units]. 1303 * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional. 1304 * A 1-D tensor of shape [num_units]. 1305 * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional. 1306 * A 1-D tensor of shape [num_units]. 1307 * * 12:The input gate bias (\f$b_i\f$). Optional. 1308 * A 1-D tensor of shape [num_units]. 1309 * * 13:The forget gate bias (\f$b_f\f$). 1310 * A 1-D tensor of shape [num_units]. 1311 * * 14:The cell bias (\f$b_c\f$). 1312 * A 1-D tensor of shape [num_units]. 1313 * * 15:The output gate bias (\f$b_o\f$). 1314 * A 1-D tensor of shape [num_units]. 1315 * * 16:The projection weights (\f$W_{proj}\f$). Optional. 1316 * A 2-D tensor of shape [output_size, num_units]. 1317 * * 17:The projection bias (\f$b_{proj}\f$). Optional. 1318 * A 1-D tensor of shape [output_size]. 1319 * * 18:The output state (in) (\f$h_{t-1}\f$). 1320 * A 2-D tensor of shape [batch_size, output_size]. 1321 * * 19:The cell state (in) (\f$C_{t-1}\f$). 1322 * A 2-D tensor of shape [batch_size, num_units]. 1323 * * 20:The activation function (\f$g\f$). 1324 * A value indicating the activation function: 1325 * <ul> 1326 * <li>0: None; 1327 * <li>1: Relu; 1328 * <li>3: Relu6; 1329 * <li>4: Tanh; 1330 * <li>6: Sigmoid. 1331 * </ul> 1332 * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such 1333 * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 1334 * then clipping is disabled. 1335 * Until API level 29 this scalar must be of type {@link 1336 * ANEURALNETWORKS_FLOAT32}. Since API level 29, if all the input 1337 * tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this 1338 * scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, 1339 * otherwise if all the input tensors have the type {@link 1340 * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link 1341 * ANEURALNETWORKS_FLOAT16}. 1342 * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the 1343 * projection layer, such that values are bound within 1344 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. 1345 * Until API level 29 this scalar must be of type {@link 1346 * ANEURALNETWORKS_FLOAT32}. Since API level 29, if all the input 1347 * tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this 1348 * scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, 1349 * otherwise if all the input tensors have the type {@link 1350 * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link 1351 * ANEURALNETWORKS_FLOAT16}. 1352 * Since API level 29 there are additional inputs to this op: 1353 * * 23:The input layer normalization weights. 1354 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 1355 * to activation at input gate. 1356 * * 24:The forget layer normalization weights. 1357 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 1358 * to activation at forget gate. 1359 * * 25:The cell layer normalization weights. 1360 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 1361 * to activation at cell gate. 1362 * * 26:The output layer normalization weights. 1363 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 1364 * to activation at output gate. 1365 * 1366 * Outputs: 1367 * * 0: The scratch buffer. 1368 * A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or 1369 * [batch_size, num_units * 4] without CIFG. 1370 * * 1: The output state (out) (\f$h_t\f$). 1371 * A 2-D tensor of shape [batch_size, output_size]. 1372 * * 2: The cell state (out) (\f$C_t\f$). 1373 * A 2-D tensor of shape [batch_size, num_units]. 1374 * * 3: The output (\f$o_t\f$). 1375 * A 2-D tensor of shape [batch_size, output_size]. This is effectively 1376 * the same as the current “output state (out)” value. 1377 * 1378 * Available since API level 27. 1379 */ 1380 ANEURALNETWORKS_LSTM = 16, 1381 1382 /** 1383 * Performs an 2-D max pooling operation. 1384 * 1385 * The output dimensions are functions of the filter dimensions, stride, and 1386 * padding. 1387 * 1388 * The values in the output tensor are computed as: 1389 * 1390 * output[b, i, j, channel] = 1391 * max_{di, dj} ( 1392 * input[b, strides[1] * i + di, strides[2] * j + dj, channel] 1393 * ) 1394 * 1395 * Supported tensor {@link OperandCode}: 1396 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1397 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1398 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1399 * 1400 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 1401 * With the default data layout NHWC, the data is stored in the order of: 1402 * [batch, height, width, channels]. Alternatively, the data layout could 1403 * be NCHW, the data storage order of: [batch, channels, height, width]. 1404 * 1405 * Both explicit padding and implicit padding are supported. 1406 * 1407 * Inputs (explicit padding): 1408 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 1409 * the input. Since API level 29, zero batches is supported for this 1410 * tensor. 1411 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 1412 * the left, in the ‘width’ dimension. 1413 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 1414 * the right, in the ‘width’ dimension. 1415 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 1416 * the top, in the ‘height’ dimension. 1417 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 1418 * the bottom, in the ‘height’ dimension. 1419 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 1420 * walking through input in the ‘width’ dimension. 1421 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 1422 * walking through input in the ‘height’ dimension. 1423 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 1424 * width. 1425 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 1426 * height. 1427 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 1428 * {@link FuseCode} values. Specifies the activation to 1429 * invoke on the result. 1430 * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 1431 * Set to true to specify NCHW data layout for input0 and output0. 1432 * Available since API level 29. 1433 * 1434 * Inputs (implicit padding): 1435 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 1436 * the input. Since API level 29, zero batches is supported for this 1437 * tensor. 1438 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit 1439 * padding scheme, has to be one of the 1440 * {@link PaddingCode} values. 1441 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 1442 * walking through input in the ‘width’ dimension. 1443 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 1444 * walking through input in the ‘height’ dimension. 1445 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 1446 * width. 1447 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the filter 1448 * height. 1449 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 1450 * {@link FuseCode} values. Specifies the activation to 1451 * invoke on the result. 1452 * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 1453 * Set to true to specify NCHW data layout for input0 and output0. 1454 * Available since API level 29. 1455 * 1456 * Outputs: 1457 * * 0: The output 4-D tensor, of shape 1458 * [batches, out_height, out_width, depth]. 1459 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 1460 * the scale and zeroPoint must be the same as input0. 1461 * 1462 * Available since API level 27. 1463 */ 1464 ANEURALNETWORKS_MAX_POOL_2D = 17, 1465 1466 /** 1467 * Multiplies two tensors, element-wise. 1468 * 1469 * Takes two input tensors of identical {@link OperandCode} and compatible 1470 * dimensions. The output is the product of both input tensors, optionally 1471 * modified by an activation function. 1472 * 1473 * Two dimensions are compatible when: 1474 * 1. they are equal, or 1475 * 2. one of them is 1 1476 * 1477 * The size of the resulting output is the maximum size along each dimension 1478 * of the input operands. It starts with the trailing dimensions, and works 1479 * its way forward. 1480 * 1481 * Since API level 29, generic zero-sized input tensor is supported. Zero 1482 * dimension is only compatible with 0 or 1. The size of the output 1483 * dimension is zero if either of corresponding input dimension is zero. 1484 * 1485 * Supported tensor {@link OperandCode}: 1486 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1487 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1488 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1489 * 1490 * Supported tensor rank: up to 4 1491 * 1492 * Inputs: 1493 * * 0: A tensor. 1494 * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions 1495 * as input0. 1496 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 1497 * {@link FuseCode} values. Specifies the activation to 1498 * invoke on the result. 1499 * 1500 * Outputs: 1501 * * 0: The product, a tensor of the same {@link OperandCode} as input0. 1502 * For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 1503 * the following condition must be satisfied: 1504 * output_scale > input1_scale * input2_scale. 1505 * 1506 * Available since API level 27. 1507 */ 1508 ANEURALNETWORKS_MUL = 18, 1509 1510 /** 1511 * Computes rectified linear activation on the input tensor element-wise. 1512 * 1513 * The output is calculated using this formula: 1514 * 1515 * output = max(0, input) 1516 * 1517 * Supported tensor {@link OperandCode}: 1518 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1519 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1520 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1521 * 1522 * Supported tensor rank: up to 4. 1523 * 1524 * Inputs: 1525 * * 0: A tensor, specifying the input. Since API level 29, this tensor may 1526 * be zero-sized. 1527 * 1528 * Outputs: 1529 * * 0: The output tensor of same shape as input0. 1530 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 1531 * the scale and zeroPoint must be the same as input0. 1532 * 1533 * Available since API level 27. 1534 */ 1535 ANEURALNETWORKS_RELU = 19, 1536 1537 /** 1538 * Computes rectified linear 1 activation on the input tensor element-wise. 1539 * 1540 * The output is calculated using this formula: 1541 * 1542 * output = min(1.f, max(-1.f, input)) 1543 * 1544 * Supported tensor {@link OperandCode}: 1545 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1546 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1547 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1548 * 1549 * Supported tensor rank: up to 4. 1550 * 1551 * Inputs: 1552 * * 0: A tensor, specifying the input. Since API level 29, this tensor may 1553 * be zero-sized. 1554 * 1555 * Outputs: 1556 * * 0: The output tensor of the same shape as input0. 1557 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 1558 * the scale and zeroPoint must be the same as input0. 1559 * 1560 * Available since API level 27. 1561 */ 1562 ANEURALNETWORKS_RELU1 = 20, 1563 1564 /** 1565 * Computes rectified linear 6 activation on the input tensor element-wise. 1566 * 1567 * The output is calculated using this formula: 1568 * 1569 * output = min(6, max(0, input)) 1570 * 1571 * Supported tensor {@link OperandCode}: 1572 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1573 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1574 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1575 * 1576 * Supported tensor rank: up to 4. 1577 * 1578 * Inputs: 1579 * * 0: A tensor, specifying the input. Since API level 29, this tensor may 1580 * be zero-sized. 1581 * 1582 * Outputs: 1583 * * 0: The output tensor of same shape as input0. 1584 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 1585 * the scale and zeroPoint must be the same as input0. 1586 * 1587 * Available since API level 27. 1588 */ 1589 ANEURALNETWORKS_RELU6 = 21, 1590 1591 /** 1592 * Reshapes a tensor. 1593 * 1594 * Given tensor, this operation returns a tensor that has the same values as 1595 * tensor, but with a newly specified shape. 1596 * 1597 * Supported tensor {@link OperandCode}: 1598 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1599 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1600 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1601 * 1602 * Supported tensor rank: up to 4. 1603 * 1604 * Inputs: 1605 * * 0: A tensor, specifying the tensor to be reshaped. 1606 * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, defining the 1607 * shape of the output tensor. The number of elements implied by shape 1608 * must be the same as the number of elements in the input tensor. 1609 * 1610 * If one component of shape is the special value -1, the size of that 1611 * dimension is computed so that the total size remains constant. In 1612 * particular, a shape of [-1] flattens into 1-D. At most one component 1613 * of shape can be -1. 1614 * 1615 * Outputs: 1616 * * 0: The output tensor, of shape specified by the input shape. 1617 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 1618 * the scale and zeroPoint must be the same as input0. 1619 * 1620 * Available since API level 27. 1621 */ 1622 ANEURALNETWORKS_RESHAPE = 22, 1623 1624 /** 1625 * Resizes images to given size using the bilinear interpretation. 1626 * 1627 * Resized images must be distorted if their output aspect ratio is not the 1628 * same as input aspect ratio. The corner pixels of output may not be the 1629 * same as corner pixels of input. 1630 * 1631 * Supported tensor {@link OperandCode}: 1632 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1633 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1634 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) 1635 * 1636 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 1637 * With the default data layout NHWC, the data is stored in the order of: 1638 * [batch, height, width, channels]. Alternatively, the data layout could 1639 * be NCHW, the data storage order of: [batch, channels, height, width]. 1640 * 1641 * Both resizing by shape and resizing by scale are supported. 1642 * 1643 * Inputs (resizing by shape): 1644 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 1645 * the input. Since API level 29, zero batches is supported for this 1646 * tensor. 1647 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 1648 * width of the output tensor. 1649 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 1650 * height of the output tensor. 1651 * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 1652 * Set to true to specify NCHW data layout for input0 and output0. 1653 * Available since API level 29. 1654 * 1655 * Inputs (resizing by scale, since API level 29): 1656 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 1657 * the input. Zero batches is supported for this tensor. 1658 * * 1: A scalar, specifying width_scale, the scaling factor of the width 1659 * dimension from the input tensor to the output tensor. The output 1660 * width is calculated as new_width = floor(width * width_scale). 1661 * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is 1662 * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of 1663 * {@link ANEURALNETWORKS_FLOAT32} otherwise. 1664 * * 2: A scalar, specifying height_scale, the scaling factor of the height 1665 * dimension from the input tensor to the output tensor. The output 1666 * height is calculated as new_height = floor(height * height_scale). 1667 * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is 1668 * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of 1669 * {@link ANEURALNETWORKS_FLOAT32} otherwise. 1670 * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 1671 * Set to true to specify NCHW data layout for input0 and output0. 1672 * 1673 * Outputs: 1674 * * 0: The output 4-D tensor, of shape 1675 * [batches, new_height, new_width, depth]. 1676 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 1677 * the scale and zeroPoint must be the same as input0. 1678 * 1679 * Available since API level 27. 1680 */ 1681 ANEURALNETWORKS_RESIZE_BILINEAR = 23, 1682 1683 /** 1684 * A basic recurrent neural network layer. 1685 * 1686 * This layer implements the operation: 1687 * outputs = state = activation(inputs * input_weights + 1688 * state * recurrent_weights + bias) 1689 * 1690 * Where: 1691 * * “input_weights” is a weight matrix that multiplies the inputs; 1692 * * “recurrent_weights” is a weight matrix that multiplies the current 1693 * “state” which itself is the output from the previous time step 1694 * computation; 1695 * * “bias” is a bias vector (added to each output vector in the batch); 1696 * * “activation” is the function passed as the “fused_activation_function” 1697 * argument (if not “NONE”). 1698 * 1699 * Supported tensor {@link OperandCode}: 1700 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1701 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1702 * 1703 * The input tensors must all be the same type. 1704 * 1705 * Inputs: 1706 * * 0: input. 1707 * A 2-D tensor of shape [batch_size, input_size], where “batch_size” 1708 * corresponds to the batching dimension, and “input_size” is the size 1709 * of the input. 1710 * * 1: weights. 1711 * A 2-D tensor of shape [num_units, input_size], where “num_units” 1712 * corresponds to the number of units. 1713 * * 2: recurrent_weights. 1714 * A 2-D tensor of shape [num_units, num_units], with columns 1715 * corresponding to the weights from each unit. 1716 * * 3: bias. 1717 * A 1-D tensor of shape [num_units]. 1718 * * 4: hidden state (in). 1719 * A 2-D tensor of shape [batch_size, num_units]. 1720 * * 5: fused_activation_function. 1721 * An optional {@link FuseCode} value indicating the 1722 * activation function. If “NONE” is specified then it results in a 1723 * linear activation. 1724 * 1725 * Outputs: 1726 * * 0: hidden state (out). 1727 * A 2-D tensor of shape [batch_size, num_units]. 1728 * 1729 * * 1: output. 1730 * A 2-D tensor of shape [batch_size, num_units]. This is effectively 1731 * the same as the current state value. 1732 * 1733 * Available since API level 27. 1734 */ 1735 ANEURALNETWORKS_RNN = 24, 1736 1737 /** 1738 * Computes the softmax activation on the input tensor element-wise, per 1739 * batch, by normalizing the input vector so the maximum coefficient is 1740 * zero. 1741 * 1742 * The output is calculated using this formula: 1743 * 1744 * output[batch, i] = 1745 * exp((input[batch, i] - max(input[batch, :])) * beta) / 1746 * sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)} 1747 * 1748 * For input tensor with rank other than 2, the activation will be applied 1749 * independently on each 1-D slice along specified dimension. 1750 * 1751 * Supported tensor {@link OperandCode}: 1752 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1753 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1754 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1755 * 1756 * Supported tensor rank: up to 4. 1757 * Tensors with rank other than 2 or 4 are only supported since API level 29. 1758 * 1759 * Inputs: 1760 * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. Since 1761 * API level 29, this tensor may be zero-sized. 1762 * * 1: A scalar, specifying the positive scaling factor for the exponent, 1763 * beta. If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT32} or 1764 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the scalar must be of 1765 * {@link ANEURALNETWORKS_FLOAT32}. If input0 is of {@link 1766 * ANEURALNETWORKS_TENSOR_FLOAT16}, then the scalar must be of {@link 1767 * ANEURALNETWORKS_FLOAT16}. 1768 * * 2: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1, 1769 * specifying the dimension the activation would be performed on. 1770 * Negative index is used to specify axis from the end (e.g. -1 for 1771 * the last axis). Must be in the range [-n, n). 1772 * Available since API level 29. 1773 * 1774 * Outputs: 1775 * * 0: The output tensor of same shape as input0. 1776 * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 1777 * the scale must be 1.f / 256 and the zeroPoint must be 0. 1778 * 1779 * Available since API level 27. 1780 */ 1781 ANEURALNETWORKS_SOFTMAX = 25, 1782 1783 /** 1784 * Rearranges blocks of spatial data, into depth. 1785 * 1786 * More specifically, this op outputs a copy of the input tensor where 1787 * values from the height and width dimensions are moved to the depth 1788 * dimension. The value block_size indicates the input block size and how 1789 * the data is moved. 1790 * 1791 * Chunks of data of size block_size * block_size from depth are rearranged 1792 * into non-overlapping blocks of size block_size x block_size. 1793 * 1794 * The depth of the output tensor is input_depth * block_size * block_size. 1795 * The input tensor's height and width must be divisible by block_size. 1796 * 1797 * Supported tensor {@link OperandCode}: 1798 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1799 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1800 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1801 * 1802 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 1803 * With the default data layout NHWC, the data is stored in the order of: 1804 * [batch, height, width, channels]. Alternatively, the data layout could 1805 * be NCHW, the data storage order of: [batch, channels, height, width]. 1806 * 1807 * Inputs: 1808 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 1809 * specifying the input. 1810 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size. 1811 * block_size must be >=1 and block_size must be a divisor of both the 1812 * input height and width. 1813 * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 1814 * Set to true to specify NCHW data layout for input0 and output0. 1815 * Available since API level 29. 1816 * 1817 * Outputs: 1818 * * 0: The output 4-D tensor, of shape [batches, height/block_size, 1819 * width/block_size, depth_in*block_size*block_size]. 1820 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 1821 * the scale and zeroPoint must be the same as input0. 1822 * 1823 * Available since API level 27. 1824 */ 1825 ANEURALNETWORKS_SPACE_TO_DEPTH = 26, 1826 1827 /** 1828 * SVDF op is a kind of stateful layer derived from the notion that a 1829 * densely connected layer that's processing a sequence of input frames can 1830 * be approximated by using a singular value decomposition of each of its 1831 * nodes. The implementation is based on: 1832 * 1833 * https://research.google.com/pubs/archive/43813.pdf 1834 * 1835 * P. Nakkiran, R. Alvarez, R. Prabhavalkar, C. Parada. 1836 * “Compressing Deep Neural Networks using a Rank-Constrained Topology”. 1837 * INTERSPEECH, 2015. 1838 * 1839 * It processes the incoming input using a 2-stage filtering mechanism: 1840 * * stage 1 performs filtering on the "features" dimension, whose outputs 1841 * get pushed into a memory of fixed-size memory_size. 1842 * * stage 2 performs filtering on the "time" dimension of the memory_size 1843 * memoized outputs of stage 1. 1844 * 1845 * Specifically, for rank 1, this layer implements the operation: 1846 * 1847 * memory = push(conv1d(inputs, weights_feature, feature_dim, 1848 * "ANEURALNETWORKS_PADDING_VALID")); 1849 * outputs = activation(memory * weights_time + bias); 1850 * 1851 * Where: 1852 * * “weights_feature” is a weights matrix that processes the inputs (by 1853 * convolving the input with every “feature filter”), and whose outputs 1854 * get pushed, stacked in order, into the fixed-size “memory” (the oldest 1855 * entry gets dropped); 1856 * * “weights_time” is a weights matrix that processes the “memory” (by a 1857 * batched matrix multiplication on the num_units); 1858 * * “bias” is an optional bias vector (added to each output vector in the 1859 * batch); and 1860 * * “activation” is the function passed as the “fused_activation_function” 1861 * argument (if not “NONE”). 1862 * 1863 * Each rank adds a dimension to the weights matrices by means of stacking 1864 * the filters. 1865 * 1866 * Supported tensor {@link OperandCode}: 1867 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1868 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1869 * 1870 * All input tensors must be the same type. 1871 * 1872 * Inputs: 1873 * * 0: input. 1874 * A 2-D tensor of shape [batch_size, input_size], where “batch_size” 1875 * corresponds to the batching dimension, and “input_size” is the size 1876 * of the input. 1877 * * 1: weights_feature. 1878 * A 2-D tensor of shape [num_units, input_size], where “num_units” 1879 * corresponds to the number of units. 1880 * * 2: weights_time. 1881 * A 2-D tensor of shape [num_units, memory_size], where “memory_size” 1882 * corresponds to the fixed-size of the memory. 1883 * * 3: bias. 1884 * An optional 1-D tensor of shape [num_units]. 1885 * * 4: state (in). 1886 * A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank]. 1887 * * 5: rank. 1888 * The rank of the SVD approximation. 1889 * * 6: fused_activation_function. 1890 * An optional {@link FuseCode} value indicating the 1891 * activation function. If “NONE” is specified then it results in a 1892 * linear activation. 1893 * 1894 * Outputs: 1895 * * 0: state (out). 1896 * A 2-D tensor of the same {@link OperandCode} as the inputs, with shape 1897 * [batch_size, (memory_size - 1) * num_units * rank]. 1898 * * 1: output. 1899 * A 2-D tensor of the same {@link OperandCode} as the inputs, with shape 1900 * [batch_size, num_units]. 1901 * 1902 * Available since API level 27. 1903 */ 1904 ANEURALNETWORKS_SVDF = 27, 1905 1906 /** 1907 * Computes hyperbolic tangent of input tensor element-wise. 1908 * 1909 * The output is calculated using this formula: 1910 * 1911 * output = tanh(input) 1912 * 1913 * Supported tensor {@link OperandCode}: 1914 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1915 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1916 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) 1917 * 1918 * Supported tensor rank: up to 4. 1919 * 1920 * Inputs: 1921 * * 0: A tensor, specifying the input. Since API level 29, this tensor may 1922 * be zero-sized. 1923 * 1924 * Outputs: 1925 * * 0: The output tensor of same shape as input0. 1926 * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 1927 * the scale must be 1.f / 128 and the zeroPoint must be 128. 1928 * 1929 * Available since API level 27. 1930 */ 1931 ANEURALNETWORKS_TANH = 28, 1932 1933 // Operations below are available since API level 28. 1934 1935 // TODO: make the description easier to understand. 1936 /** 1937 * BatchToSpace for N-dimensional tensors. 1938 * 1939 * This operation reshapes the batch dimension (dimension 0) into M + 1 1940 * dimensions of shape block_shape + [batch], interleaves these blocks back 1941 * into the grid defined by the spatial dimensions [1, ..., M], to obtain a 1942 * result with the same rank as the input. 1943 * 1944 * This is the reverse of SpaceToBatch. 1945 * 1946 * Supported tensor {@link OperandCode}: 1947 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 1948 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 1949 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 1950 * 1951 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 1952 * With the default data layout NHWC, the data is stored in the order of: 1953 * [batch, height, width, channels]. Alternatively, the data layout could 1954 * be NCHW, the data storage order of: [batch, channels, height, width]. 1955 * 1956 * Inputs: 1957 * * 0: An n-D tensor, specifying the tensor to be reshaped 1958 * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block 1959 * sizes for each spatial dimension of the input tensor. All values 1960 * must be >= 1. 1961 * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 1962 * Set to true to specify NCHW data layout for input0 and output0. 1963 * Available since API level 29. 1964 * 1965 * Outputs: 1966 * * 0: A tensor of the same {@link OperandCode} as input0. 1967 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 1968 * the scale and zeroPoint must be the same as input0. 1969 * 1970 * Available since API level 28. 1971 */ 1972 ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29, 1973 1974 /** 1975 * Element-wise division of two tensors. 1976 * 1977 * Takes two input tensors of identical {@link OperandCode} and compatible 1978 * dimensions. The output is the result of dividing the first input tensor 1979 * by the second, optionally modified by an activation function. 1980 * 1981 * Two dimensions are compatible when: 1982 * 1. they are equal, or 1983 * 2. one of them is 1 1984 * 1985 * The size of the output is the maximum size along each dimension of the 1986 * input operands. It starts with the trailing dimensions, and works its way 1987 * forward. 1988 * 1989 * Example: 1990 * input1.dimension = {4, 1, 2} 1991 * input2.dimension = {5, 4, 3, 1} 1992 * output.dimension = {5, 4, 3, 2} 1993 * 1994 * Since API level 29, generic zero-sized input tensor is supported. Zero 1995 * dimension is only compatible with 0 or 1. The size of the output 1996 * dimension is zero if either of corresponding input dimension is zero. 1997 * 1998 * Supported tensor {@link OperandCode}: 1999 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2000 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2001 * 2002 * Supported tensor rank: up to 4 2003 * 2004 * Inputs: 2005 * * 0: An n-D tensor, specifying the first input. 2006 * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions 2007 * as input0. 2008 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 2009 * {@link FuseCode} values. Specifies the activation to 2010 * invoke on the result. 2011 * 2012 * Outputs: 2013 * * 0: A tensor of the same {@link OperandCode} as input0. 2014 * 2015 * Available since API level 28. 2016 */ 2017 ANEURALNETWORKS_DIV = 30, 2018 2019 /** 2020 * Computes the mean of elements across dimensions of a tensor. 2021 * 2022 * Reduces the input tensor along the given dimensions to reduce. Unless 2023 * keep_dims is true, the rank of the tensor is reduced by 1 for each entry 2024 * in axis. If keep_dims is true, the reduced dimensions are retained with 2025 * length 1. 2026 * 2027 * Supported tensor {@link OperandCode}: 2028 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2029 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2030 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2031 * 2032 * Supported tensor rank: up to 4 2033 * 2034 * Inputs: 2035 * * 0: A tensor, specifying the input. 2036 * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions 2037 * to reduce. Must be in the range 2038 * [-rank(input_tensor), rank(input_tensor)). 2039 * 2040 * NOTE: When the operation was introduced, the documentation 2041 * incorrectly stated that if dimensions were empty, the operation 2042 * would reduce across all dimensions. This behavior was never 2043 * implemented. 2044 * 2045 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, keep_dims. If positive, 2046 * retains reduced dimensions with length 1. 2047 * 2048 * Outputs: 2049 * * 0: A tensor of the same {@link OperandCode} as input0. 2050 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 2051 * the scale and zeroPoint must be same as input0. 2052 * 2053 * Available since API level 28. 2054 */ 2055 ANEURALNETWORKS_MEAN = 31, 2056 2057 /** 2058 * Pads a tensor with zeros. 2059 * 2060 * This operation pads a tensor according to the specified paddings. 2061 * 2062 * Supported tensor {@link OperandCode}: 2063 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2064 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2065 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (full support since API 2066 * level 29, see the output section) 2067 * 2068 * Supported tensor rank: up to 4 2069 * 2070 * Inputs: 2071 * * 0: An n-D tensor, specifying the tensor to be padded. 2072 * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings 2073 * for each spatial dimension of the input tensor. The shape of the 2074 * tensor must be {rank(input0), 2}. 2075 * padding[i, 0] specifies the number of elements to be padded in the 2076 * front of dimension i. 2077 * padding[i, 1] specifies the number of elements to be padded after the 2078 * end of dimension i. 2079 * 2080 * Outputs: 2081 * * 0: A tensor of the same {@link OperandCode} as input0. The 2082 * output tensor has the same rank as input0, and each 2083 * dimension of the output tensor has the same size as the 2084 * corresponding dimension of the input tensor plus the size 2085 * of the padding: 2086 * output0.dimension[i] = 2087 * padding[i, 0] + input0.dimension[i] + padding[i, 1] 2088 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 2089 * the scale and zeroPoint must be the same as input0. 2090 * 2091 * NOTE: Before API level 29, the pad value for 2092 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined. 2093 * Since API level 29, the pad value is always the logical zero. 2094 * 2095 * Available since API level 28. 2096 */ 2097 ANEURALNETWORKS_PAD = 32, 2098 2099 // TODO: make the description easier to understand. 2100 /** 2101 * SpaceToBatch for N-Dimensional tensors. 2102 * 2103 * This operation divides "spatial" dimensions [1, ..., M] of the input into 2104 * a grid of blocks of shape block_shape, and interleaves these blocks with 2105 * the "batch" dimension (0) such that in the output, the spatial dimensions 2106 * [1, ..., M] correspond to the position within the grid, and the batch 2107 * dimension combines both the position within a spatial block and the 2108 * original batch position. Prior to division into blocks, the spatial 2109 * dimensions of the input are optionally zero padded according to paddings. 2110 * 2111 * Supported tensor {@link OperandCode}: 2112 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2113 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2114 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (full support since API 2115 * level 29, see the output section) 2116 * 2117 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 2118 * With the default data layout NHWC, the data is stored in the order of: 2119 * [batch, height, width, channels]. Alternatively, the data layout could 2120 * be NCHW, the data storage order of: [batch, channels, height, width]. 2121 * 2122 * Inputs: 2123 * * 0: An n-D tensor, specifying the input. 2124 * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block 2125 * sizes for each spatial dimension of the input tensor. All values 2126 * must be >= 1. 2127 * * 2: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings 2128 * for each spatial dimension of the input tensor. All values must be 2129 * >= 0. The shape of the tensor must be {M, 2}, where M is the number 2130 * of spatial dimensions. 2131 * padding[i, 0] specifies the number of element to be padded in the 2132 * front of dimension i. 2133 * padding[i, 1] specifies the number of element to be padded after the 2134 * end of dimension i. 2135 * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false. 2136 * Set to true to specify NCHW data layout for input0 and output0. 2137 * Available since API level 29. 2138 * 2139 * Outputs: 2140 * * 0: A tensor of the same {@link OperandCode} as input0. 2141 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 2142 * the scale and zeroPoint must be the same as input0. 2143 * 2144 * NOTE: Before API level 29, the pad value for 2145 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined. 2146 * Since API level 29, the pad value is always the logical zero. 2147 * 2148 * Available since API level 28. 2149 */ 2150 ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33, 2151 2152 /** 2153 * Removes dimensions of size 1 from the shape of a tensor. 2154 * 2155 * Given a tensor input, this operation returns a tensor of the same 2156 * {@link OperandCode} with all dimensions of size 1 removed. If you don't 2157 * want to remove all size 1 dimensions, you can remove specific size 1 2158 * dimensions by specifying the axes (input1). 2159 * 2160 * Supported tensor {@link OperandCode}: 2161 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2162 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2163 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2164 * 2165 * Supported tensor rank: up to 4 2166 * 2167 * Inputs: 2168 * * 0: An n-D tensor, the tensor to be squeezed. 2169 * * 1: An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The 2170 * dimensions to squeeze. If specified only squeezes the dimensions 2171 * listed. Otherwise, squeezes all dimensions. The dimension index 2172 * starts at 0. An error must be reported if squeezing a dimension that 2173 * is not 1. 2174 * 2175 * Outputs: 2176 * * 0: A tensor of the same {@link OperandCode} as input0. Contains the 2177 * same data as input, but has one or more dimensions of size 1 2178 * removed. 2179 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 2180 * the scale and zeroPoint must be the same as input0. 2181 * 2182 * Available since API level 28. 2183 */ 2184 ANEURALNETWORKS_SQUEEZE = 34, 2185 2186 /** 2187 * Extracts a strided slice of a tensor. 2188 * 2189 * Roughly speaking, this op extracts a slice of size (end - begin) / stride 2190 * from the given input tensor. Starting at the location specified by begin 2191 * the slice continues by adding stride to the index until all dimensions 2192 * are not less than end. Note that a stride can be negative, which causes a 2193 * reverse slice. 2194 * 2195 * Supported tensor {@link OperandCode}: 2196 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2197 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2198 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2199 * 2200 * Supported tensor rank: up to 4 2201 * 2202 * Inputs: 2203 * * 0: An n-D tensor, specifying the tensor to be sliced. 2204 * * 1: begin, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The 2205 * starts of the dimensions of the input tensor to be sliced. The 2206 * length must be of rank(input0). 2207 * * 2: end, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The 2208 * ends of the dimensions of the input tensor to be sliced. The length 2209 * must be of rank(input0). 2210 * * 3: strides, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The 2211 * strides of the dimensions of the input tensor to be sliced. The 2212 * length must be of rank(input0). The entries must be non-zero. 2213 * * 4: begin_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit 2214 * of begin_mask is set, begin[i] is ignored and the fullest possible 2215 * range in that dimension is used instead. 2216 * * 5: end_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit of 2217 * end_mask is set, end[i] is ignored and the fullest possible range in 2218 * that dimension is used instead. 2219 * * 6: shrink_axis_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the 2220 * ith bit of shrink_axis_mask is set, the ith dimension specification 2221 * shrinks the dimensionality by 1, taking on the value at index 2222 * begin[i]. In this case, the ith specification must define a 2223 * slice of size 1, e.g. begin[i] = x, end[i] = x + 1. 2224 * 2225 * Outputs: 2226 * * 0: A tensor of the same {@link OperandCode} as input0 and rank (n - k), 2227 * where k is the number of bits set in shrink_axis_mask. 2228 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 2229 * the scale and zeroPoint must be the same as input0. 2230 * 2231 * Available since API level 28. 2232 */ 2233 ANEURALNETWORKS_STRIDED_SLICE = 35, 2234 2235 /** 2236 * Element-wise subtraction of two tensors. 2237 * 2238 * Takes two input tensors of identical {@link OperandCode} and compatible 2239 * dimensions. The output is the result of subtracting the second input 2240 * tensor from the first one, optionally modified by an activation function. 2241 * 2242 * Two dimensions are compatible when: 2243 * 1. they are equal, or 2244 * 2. one of them is 1 2245 * 2246 * The size of the output is the maximum size along each dimension of the 2247 * input operands. It starts with the trailing dimensions, and works its way 2248 * forward. 2249 * 2250 * Example: 2251 * input1.dimension = {4, 1, 2} 2252 * input2.dimension = {5, 4, 3, 1} 2253 * output.dimension = {5, 4, 3, 2} 2254 * 2255 * Since API level 29, generic zero-sized input tensor is supported. Zero 2256 * dimension is only compatible with 0 or 1. The size of the output 2257 * dimension is zero if either of corresponding input dimension is zero. 2258 * 2259 * Supported tensor {@link OperandCode}: 2260 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2261 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2262 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29) 2263 * 2264 * Supported tensor rank: up to 4 2265 * 2266 * Inputs: 2267 * * 0: An n-D tensor, specifying the first input. 2268 * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions 2269 * as input0. 2270 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 2271 * {@link FuseCode} values. Specifies the activation to 2272 * invoke on the result. 2273 * 2274 * Outputs: 2275 * * 0: A tensor of the same {@link OperandCode} as input0. 2276 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 2277 * the scale and zeroPoint can be different from inputs' scale and zeroPoint. 2278 * 2279 * Available since API level 28. 2280 */ 2281 ANEURALNETWORKS_SUB = 36, 2282 2283 /** 2284 * Transposes the input tensor, permuting the dimensions according to the 2285 * perm tensor. 2286 * 2287 * The returned tensor's dimension i corresponds to the input dimension 2288 * perm[i]. If perm is not given, it is set to (n-1...0), where n is the 2289 * rank of the input tensor. Hence by default, this operation performs a 2290 * regular matrix transpose on 2-D input Tensors. 2291 * 2292 * Supported tensor {@link OperandCode}: 2293 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 2294 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2295 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2296 * 2297 * Supported tensor rank: up to 4 2298 * 2299 * Inputs: 2300 * * 0: An n-D tensor, specifying the tensor to be transposed. 2301 * Since API level 29, this tensor may be zero-sized. 2302 * * 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, 2303 * the permutation of the dimensions of the input tensor. 2304 * 2305 * Outputs: 2306 * * 0: A tensor of the same {@link OperandCode} as input0. 2307 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 2308 * the scale and zeroPoint must be the same as input0. 2309 * 2310 * Available since API level 28. 2311 */ 2312 ANEURALNETWORKS_TRANSPOSE = 37, 2313 2314 // Operations below are available since API level 29. 2315 2316 /** 2317 * Computes the absolute value of a tensor, element-wise. 2318 * 2319 * Supported tensor {@link OperandCode}: 2320 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2321 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2322 * 2323 * Supported tensor rank: from 1. 2324 * 2325 * Inputs: 2326 * * 0: A tensor. 2327 * 2328 * Outputs: 2329 * * 0: The output tensor of same shape as input0. 2330 * 2331 * Available since API level 29. 2332 */ 2333 ANEURALNETWORKS_ABS = 38, 2334 2335 /** 2336 * Returns the index of the largest element along an axis. 2337 * 2338 * Supported tensor {@link OperandCode}: 2339 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2340 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2341 * * {@link ANEURALNETWORKS_TENSOR_INT32} 2342 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2343 * 2344 * Supported tensor rank: from 1 2345 * 2346 * Inputs: 2347 * * 0: An n-D tensor specifying the input. Must be non-empty. 2348 * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to 2349 * reduce across. Negative index is used to specify axis from the 2350 * end (e.g. -1 for the last axis). Must be in the range [-n, n). 2351 * 2352 * Outputs: 2353 * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor. 2354 * 2355 * Available since API level 29. 2356 */ 2357 // There is no underscore in ARG_MAX to avoid name conflict with 2358 // the macro defined in libc/kernel/uapi/linux/limits.h. 2359 ANEURALNETWORKS_ARGMAX = 39, 2360 2361 /** 2362 * Returns the index of the smallest element along an axis. 2363 * 2364 * Supported tensor {@link OperandCode}: 2365 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2366 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2367 * * {@link ANEURALNETWORKS_TENSOR_INT32} 2368 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2369 * 2370 * Supported tensor rank: from 1 2371 * 2372 * Inputs: 2373 * * 0: An n-D tensor specifying the input. Must be non-empty. 2374 * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to 2375 * reduce across. Negative index is used to specify axis from the 2376 * end (e.g. -1 for the last axis). Must be in the range [-n, n). 2377 * 2378 * Outputs: 2379 * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor. 2380 * 2381 * Available since API level 29. 2382 */ 2383 ANEURALNETWORKS_ARGMIN = 40, // See ARGMAX for naming discussion. 2384 2385 /** 2386 * Transform axis-aligned bounding box proposals using bounding box deltas. 2387 * 2388 * Given the positions of bounding box proposals and the corresponding 2389 * bounding box deltas for each class, return the refined bounding box 2390 * regions. The resulting bounding boxes are cliped against the edges of 2391 * the image. 2392 * 2393 * Supported tensor {@link OperandCode}: 2394 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2395 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2396 * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM} 2397 * 2398 * Inputs: 2399 * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the 2400 * bounding box proposals, each line with format [x1, y1, x2, y2]. 2401 * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, 2402 * the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois 2403 * is supported for this tensor. 2404 * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the 2405 * bounding box delta for each region of interest and each class. The 2406 * bounding box deltas are organized in the following order 2407 * [dx, dy, dw, dh], where dx and dy is the relative correction factor 2408 * for the center position of the bounding box with respect to the width 2409 * and height, dw and dh is the log-scale relative correction factor 2410 * for the width and height. For input0 of type 2411 * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, this tensor should be 2412 * of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}. Zero num_rois is 2413 * supported for this tensor. 2414 * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 2415 * [num_rois], specifying the batch index of each box. Boxes with 2416 * the same batch index are grouped together. Zero num_rois is 2417 * supported for this tensor. 2418 * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of 2419 * each image in the batch, each line with format 2420 * [image_height, image_width]. 2421 * 2422 * Outputs: 2423 * * 0: A tensor of the same {@link OperandCode} as input0, with shape 2424 * [num_rois, num_classes * 4], specifying the coordinates of each 2425 * output bounding box for each class, with format [x1, y1, x2, y2]. 2426 * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the 2427 * scale must be 0.125 and the zero point must be 0. 2428 * 2429 * Available since API level 29. 2430 */ 2431 ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM = 41, 2432 2433 /** 2434 * Performs a forward LSTM on the input followed by a backward LSTM. 2435 * 2436 * Supported tensor {@link OperandCode}: 2437 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2438 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2439 * 2440 * Supported tensor rank: 3, either time-major or batch-major. 2441 * 2442 * All input and output tensors must be of the same type. 2443 * 2444 * 2445 * Inputs: 2446 * * 0: The input. 2447 * A 3-D tensor of shape: 2448 * If time-major: [max_time, batch_size, input_size] 2449 * If batch-major: [batch_size, max_time, input_size] 2450 * where "max_time" is the number of timesteps (sequence length), 2451 * "batch_size" corresponds to the batching dimension, and 2452 * "input_size" is the size of the input. 2453 * * 1: The forward input-to-input weights. Optional. 2454 * A 2-D tensor of shape [fw_num_units, input_size], where “fw_num_units” 2455 * corresponds to the number of forward cell units. 2456 * * 2: The forward input-to-forget weights. 2457 * A 2-D tensor of shape [fw_num_units, input_size]. 2458 * * 3: The forward input-to-cell weights. 2459 * A 2-D tensor of shape [fw_num_units, input_size]. 2460 * * 4: The forward input-to-output weights. 2461 * A 2-D tensor of shape [fw_num_units, input_size]. 2462 * * 5: The forward recurrent-to-input weights. Optional. 2463 * A 2-D tensor of shape [fw_num_units, fw_output_size], where “fw_output_size” 2464 * corresponds to either the number of cell units (i.e., fw_num_units), 2465 * or the second dimension of the “fw_projection_weights”, if defined. 2466 * * 6: The forward recurrent-to-forget weights. 2467 * A 2-D tensor of shape [fw_num_units, fw_output_size]. 2468 * * 7: The forward recurrent-to-cell weights. 2469 * A 2-D tensor of shape [fw_num_units, fw_output_size]. 2470 * * 8: The forward recurrent-to-output weights. 2471 * A 2-D tensor of shape [fw_num_units, fw_output_size]. 2472 * * 9: The forward cell-to-input weights. Optional. 2473 * A 1-D tensor of shape [fw_num_units]. 2474 * * 10: The forward cell-to-forget weights. Optional. 2475 * A 1-D tensor of shape [fw_num_units]. 2476 * * 11: The forward cell-to-output weights. Optional. 2477 * A 1-D tensor of shape [fw_num_units]. 2478 * * 12: The forward input gate bias. Optional. 2479 * A 1-D tensor of shape [fw_num_units]. 2480 * * 13: The forward forget gate bias. 2481 * A 1-D tensor of shape [fw_num_units]. 2482 * * 14: The forward cell gate bias. 2483 * A 1-D tensor of shape [fw_num_units]. 2484 * * 15: The forward output gate bias. 2485 * A 1-D tensor of shape [fw_num_units]. 2486 * * 16: The forward projection weights. Optional. 2487 * A 2-D tensor of shape [fw_output_size, fw_num_units]. 2488 * * 17: The forward projection bias. Optional. 2489 * A 1-D tensor of shape [fw_output_size]. 2490 * * 18: The backward input-to-input weights. Optional. 2491 * A 2-D tensor of shape [bw_num_units, input_size], where “bw_num_units” 2492 * corresponds to the number of backward cell units. 2493 * * 19: The backward input-to-forget weights. 2494 * A 2-D tensor of shape [bw_num_units, input_size]. 2495 * * 20: The backward input-to-cell weights. 2496 * A 2-D tensor of shape [bw_num_units, input_size]. 2497 * * 21: The backward input-to-output weights. 2498 * A 2-D tensor of shape [bw_num_units, input_size]. 2499 * * 22: The backward recurrent-to-input weights. Optional. 2500 * A 2-D tensor of shape [bw_num_units, bw_output_size], where “bw_output_size” 2501 * corresponds to either the number of cell units (i.e., “bw_num_units”), 2502 * or the second dimension of the “bw_projection_weights”, if defined. 2503 * * 23: The backward recurrent-to-forget weights. 2504 * A 2-D tensor of shape [bw_num_units, bw_output_size]. 2505 * * 24: The backward recurrent-to-cell weights. 2506 * A 2-D tensor of shape [bw_num_units, bw_output_size]. 2507 * * 25: The backward recurrent-to-output weights. 2508 * A 2-D tensor of shape [bw_num_units, bw_output_size]. 2509 * * 26: The backward cell-to-input weights. Optional. 2510 * A 1-D tensor of shape [bw_num_units]. 2511 * * 27: The backward cell-to-forget weights. Optional. 2512 * A 1-D tensor of shape [bw_num_units]. 2513 * * 28: The backward cell-to-output weights. Optional. 2514 * A 1-D tensor of shape [bw_num_units]. 2515 * * 29: The backward input gate bias. Optional. 2516 * A 1-D tensor of shape [bw_num_units]. 2517 * * 30: The backward forget gate bias. 2518 * A 1-D tensor of shape [bw_num_units]. 2519 * * 31: The backward cell gate bias. 2520 * A 1-D tensor of shape [bw_num_units]. 2521 * * 32: The backward output gate bias. 2522 * A 1-D tensor of shape [bw_num_units]. 2523 * * 33: The backward projection weights. Optional. 2524 * A 2-D tensor of shape [bw_output_size, bw_num_units]. 2525 * * 34: The backward projection bias. Optional. 2526 * A 1-D tensor of shape [bw_output_size]. 2527 * * 35: The forward input activation state. 2528 * A 2-D tensor of shape [batch_size, bw_output_size]. 2529 * * 36: The forward input cell state. 2530 * A 2-D tensor of shape [batch_size, bw_num_units]. 2531 * * 37: The backward input activation state. 2532 * A 2-D tensor of shape [batch_size, bw_output_size]. 2533 * * 38: The backward input cell state. 2534 * A 2-D tensor of shape [batch_size, bw_num_units]. 2535 * * 39: The auxiliary input. Optional. 2536 * A 3-D tensor of shape [max_time, batch_size, input_size], where “batch_size” 2537 * corresponds to the batching dimension, and “input_size” is the size 2538 * of the input. 2539 * * 40: The forward auxiliary input-to-input weights. Optional. 2540 * A 2-D tensor of shape [fw_num_units, input_size]. 2541 * * 41: The forward auxiliary input-to-forget weights. Optional. 2542 * A 2-D tensor of shape [fw_num_units, input_size]. 2543 * * 42: The forward auxiliary input-to-cell weights. Optional. 2544 * A 2-D tensor of shape [fw_num_units, input_size]. 2545 * * 43: The forward auxiliary input-to-output weights. Optional. 2546 * A 2-D tensor of shape [fw_num_units, input_size]. 2547 * * 44: The backward auxiliary input-to-input weights. Optional. 2548 * A 2-D tensor of shape [bw_num_units, input_size]. 2549 * * 45: The backward auxiliary input-to-forget weights. Optional. 2550 * A 2-D tensor of shape [bw_num_units, input_size]. 2551 * * 46: The backward auxiliary input-to-cell weights. Optional. 2552 * A 2-D tensor of shape [bw_num_units, input_size]. 2553 * * 47: The backward auxiliary input-to-output weights. Optional. 2554 * A 2-D tensor of shape [bw_num_units, input_size]. 2555 * * 48: The activation function. 2556 * A value indicating the activation function: 2557 * <ul> 2558 * <li>0: None; 2559 * <li>1: Relu; 2560 * <li>3: Relu6; 2561 * <li>4: Tanh; 2562 * <li>6: Sigmoid. 2563 * </ul> 2564 * * 49: The clipping threshold for the cell state, such 2565 * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 2566 * then clipping is disabled. 2567 * If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, 2568 * this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, 2569 * otherwise if all the input tensors have the type {@link 2570 * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link 2571 * ANEURALNETWORKS_FLOAT16}. 2572 * * 50: The clipping threshold for the output from the 2573 * projection layer, such that values are bound within 2574 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. 2575 * If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, 2576 * this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32}, 2577 * otherwise if all the input tensors have the type {@link 2578 * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link 2579 * ANEURALNETWORKS_FLOAT16}. 2580 * * 51: merge_outputs 2581 * An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs 2582 * from forward and backward cells should be merged. 2583 * * 52: time_major 2584 * An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format 2585 * of input and output tensors. 2586 * * 53: The forward input layer normalization weights. Optional. 2587 * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs 2588 * to activation at input gate. 2589 * * 54: The forward forget layer normalization weights. Optional. 2590 * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs 2591 * to activation at forget gate. 2592 * * 55: The forward cell layer normalization weights. Optional. 2593 * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs 2594 * to activation at cell gate. 2595 * * 56: The forward output layer normalization weights. Optional. 2596 * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs 2597 * to activation at output gate. 2598 * * 57: The backward input layer normalization weights. Optional. 2599 * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs 2600 * to activation at input gate. 2601 * * 58: The backward forget layer normalization weights. Optional. 2602 * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs 2603 * to activation at forget gate. 2604 * * 59: The backward cell layer normalization weights. Optional. 2605 * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs 2606 * to activation at cell gate. 2607 * * 60: The backward output layer normalization weights. Optional. 2608 * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs 2609 * to activation at output gate. 2610 * 2611 * Outputs: 2612 * * 0: The forward output. 2613 * A 3-D tensor of shape: 2614 * If time-major and not merge_outputs: 2615 * [max_time, batch_size, fw_output_size] 2616 * If time-major and merge_outputs: 2617 * [max_time, batch_size, fw_output_size + bw_output_size] 2618 * If batch-major and not merge_outputs: 2619 * [batch_size, max_time, fw_output_size] 2620 * If batch-major and merge_outputs: 2621 * [batch_size, max_time, fw_output_size + bw_output_size] 2622 * * 1: The backward output. Unused if merge_outputs is true. 2623 * A 3-D tensor of shape: 2624 * If time-major: [max_time, batch_size, bw_output_size] 2625 * If batch-major: [batch_size, max_time, bw_output_size] 2626 * 2627 * Available since API level 29. 2628 */ 2629 ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42, 2630 2631 /** 2632 * A recurrent neural network layer that applies a basic RNN cell to a 2633 * sequence of inputs in forward and backward directions. 2634 * 2635 * This Op unrolls the input along the sequence dimension, and implements 2636 * the following operation for each element in the sequence s = 2637 * 1...sequence_length: 2638 * fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weights’ + 2639 * fw_state * fw_recurrent_weights’ + fw_bias) 2640 * 2641 * And for each element in sequence t = sequence_length : 1 2642 * bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weights’ + 2643 * bw_state * bw_recurrent_weights’ + bw_bias) 2644 * 2645 * Where: 2646 * * “{fw,bw}_input_weights” is a weight matrix that multiplies the inputs; 2647 * * “{fw,bw}_recurrent_weights” is a weight matrix that multiplies the 2648 * current “state” which itself is the output from the previous time step 2649 * computation; 2650 * * “{fw,bw}_bias” is a bias vector (added to each output vector in the 2651 * batch); 2652 * * “activation” is the function passed as the “fused_activation_function” 2653 * argument (if not “NONE”). 2654 * 2655 * The op also supports an auxiliary input. Regular cell feeds one input 2656 * into the two RNN cells in the following way: 2657 * 2658 * INPUT (INPUT_REVERSED) 2659 * | | 2660 * --------------------- 2661 * | FW_RNN BW_RNN | 2662 * --------------------- 2663 * | | 2664 * FW_OUT BW_OUT 2665 * 2666 * An op with an auxiliary input takes two inputs and feeds them into the 2667 * RNN cells in the following way: 2668 * 2669 * AUX_INPUT (AUX_INPUT_REVERSED) 2670 * | | 2671 * INPUT | (INPUT_R'D.)| 2672 * | | | | 2673 * ----------------------- 2674 * | \ / \ / | 2675 * | FW_RNN BW_RNN | 2676 * ----------------------- 2677 * | | 2678 * FW_OUT BW_OUT 2679 * 2680 * While stacking this op on top of itself, this allows to connect both 2681 * forward and backward outputs from previous cell to the next cell's 2682 * inputs. 2683 * 2684 * Supported tensor {@link OperandCode}: 2685 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2686 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2687 * 2688 * The input tensors must all be the same type. 2689 * 2690 * Inputs: 2691 * * 0: input. 2692 * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If 2693 * it is set to true, then the input has a shape [maxTime, batchSize, 2694 * inputSize], otherwise the input has a shape [batchSize, maxTime, 2695 * inputSize]. 2696 * * 1: fwWeights. 2697 * A 2-D tensor of shape [fwNumUnits, inputSize]. 2698 * * 2: fwRecurrentWeights. 2699 * A 2-D tensor of shape [fwNumUnits, fwNumUnits]. 2700 * * 3: fwBias. 2701 * A 1-D tensor of shape [fwNumUnits]. 2702 * * 4: fwHiddenState. 2703 * A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden 2704 * state input for the first time step of the computation. 2705 * * 5: bwWeights. 2706 * A 2-D tensor of shape [bwNumUnits, inputSize]. 2707 * * 6: bwRecurrentWeights. 2708 * A 2-D tensor of shape [bwNumUnits, bwNumUnits]. 2709 * * 7: bwBias. 2710 * A 1-D tensor of shape [bwNumUnits]. 2711 * * 8: bwHiddenState 2712 * A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden 2713 * state input for the first time step of the computation. 2714 * * 9: auxInput. 2715 * A 3-D tensor. The shape is the same as of the input 0. 2716 * * 10:fwAuxWeights. 2717 * A 2-D tensor of shape [fwNumUnits, inputSize]. 2718 * * 11:bwAuxWeights. 2719 * A 2-D tensor of shape [bwNumUnits, inputSize]. 2720 * * 12:fusedActivationFunction. 2721 * A {@link FuseCode} value indicating the activation function. If 2722 * “NONE” is specified then it results in a linear activation. 2723 * * 13:timeMajor 2724 * An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format 2725 * of input and output tensors. 2726 * * 14:mergeOutputs 2727 * An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs 2728 * from forward and backward cells are separate (if set to false) or 2729 * concatenated (if set to true). 2730 * Outputs: 2731 * * 0: fwOutput. 2732 * A 3-D tensor. The first two dimensions of the shape are defined by 2733 * the input 6 (timeMajor) and the third dimension is defined by the 2734 * input 14 (mergeOutputs). If timeMajor is set to true, then the first 2735 * two dimensions are [maxTime, batchSize], otherwise they are set to 2736 * [batchSize, maxTime]. If mergeOutputs is set to true, then the third 2737 * dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set 2738 * to fwNumUnits. 2739 * * 1: bwOutput. 2740 * A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then 2741 * this tensor is not produced. The shape is defined by the input 6 2742 * (timeMajor). If it is set to true, then the shape is set to 2743 * [maxTime, batchSize, bwNumUnits], otherwise the shape is set to 2744 * [batchSize, maxTime, bwNumUnits]. 2745 * 2746 * Available since API level 29. 2747 */ 2748 ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN = 43, 2749 2750 /** 2751 * Greedily selects a subset of bounding boxes in descending order of score. 2752 * 2753 * This op applies NMS algorithm to each class. In each loop of execution, 2754 * the box with maximum score gets selected and removed from the pending set. 2755 * The scores of the rest of boxes are lowered according to the 2756 * intersection-over-union (IOU) overlapping with the previously selected 2757 * boxes and a specified NMS kernel method. Any boxes with score less 2758 * than a threshold are removed from the pending set. 2759 * 2760 * Three NMS kernels are supported: 2761 * * Hard: score_new = score_old * (1 if IoU < threshold else 0) 2762 * * Linear: score_new = score_old * (1 if IoU < threshold else 1 - IoU) 2763 * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma) 2764 * 2765 * Axis-aligned bounding boxes are represented by its upper-left corner 2766 * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid 2767 * bounding box should satisfy x1 <= x2 and y1 <= y2. 2768 * 2769 * Supported tensor {@link OperandCode}: 2770 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2771 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2772 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2773 * 2774 * Inputs: 2775 * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score 2776 * of each bounding box proposal. The boxes are grouped by batches in the 2777 * first dimension. Zero num_rois is supported for this tensor. 2778 * * 1: A 2-D Tensor specifying the bounding boxes of shape 2779 * [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2]. 2780 * The boxes are grouped by batches in the first dimension. The sequential 2781 * order of the boxes corresponds with input0. For input0 of type 2782 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should be of 2783 * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and 2784 * scale of 0.125. Zero num_rois is supported for this tensor. 2785 * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 2786 * [num_rois], specifying the batch index of each box. Boxes with 2787 * the same batch index are grouped together. 2788 * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, score_threshold. Boxes 2789 * with scores lower than the threshold are filtered before sending 2790 * to the NMS algorithm. 2791 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum 2792 * number of selected bounding boxes for each image. Set to a negative 2793 * value for unlimited number of output bounding boxes. 2794 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the NMS 2795 * kernel method, options are 0:hard, 1:linear, 2:gaussian. 2796 * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU 2797 * threshold in hard and linear NMS kernel. This field is ignored if 2798 * gaussian kernel is selected. 2799 * * 7: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the sigma in 2800 * gaussian NMS kernel. This field is ignored if gaussian kernel is 2801 * not selected. 2802 * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, nms_score_threshold. 2803 * Boxes with scores lower than the threshold are dropped during the 2804 * score updating phase in soft NMS. 2805 * 2806 * Outputs: 2807 * * 0: A 1-D Tensor of the same {@link OperandCode} as input0, with shape 2808 * [num_output_rois], specifying the score of each output box. The boxes 2809 * are grouped by batches, but the sequential order in each batch is not 2810 * guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 2811 * the scale and zero point must be the same as input0. 2812 * * 1: A 2-D Tensor of the same {@link OperandCode} as input1, with shape 2813 * [num_output_rois, 4], specifying the coordinates of each 2814 * output bounding box with the same format as input1. The sequential 2815 * order of the boxes corresponds with output0. For type of 2816 * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the scale must be 2817 * 0.125 and the zero point must be 0. 2818 * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 2819 * [num_output_rois], specifying the class of each output box. The 2820 * sequential order of the boxes corresponds with output0. 2821 * * 3: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 2822 * [num_output_rois], specifying the batch index of each box. Boxes 2823 * with the same batch index are grouped together. 2824 * 2825 * Available since API level 29. 2826 */ 2827 ANEURALNETWORKS_BOX_WITH_NMS_LIMIT = 44, 2828 2829 /** 2830 * Casts a tensor to a new type. 2831 * 2832 * This operation ignores the scale and zeroPoint of quanized tensors, 2833 * e.g. it treats a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} input 2834 * as a tensor of uint8 values. 2835 * 2836 * Supported tensor {@link OperandCode}: 2837 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2838 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2839 * * {@link ANEURALNETWORKS_TENSOR_INT32} 2840 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2841 * 2842 * Supported tensor rank: from 1 2843 * 2844 * Inputs: 2845 * * 0: A tensor. 2846 * 2847 * Outputs: 2848 * * 0: A tensor with the same shape as input0. 2849 * 2850 * Available since API level 29. 2851 */ 2852 ANEURALNETWORKS_CAST = 45, 2853 2854 /** 2855 * Shuffle the channels of the input tensor. 2856 * 2857 * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE 2858 * divide the channel dimension into num_groups groups, and reorganize the 2859 * channels by grouping channels with the same index in each group. 2860 * 2861 * Along the channel dimension, the output is calculated using this formula: 2862 * 2863 * output_channel[k * num_groups + g] = input_channel[g * group_size + k] 2864 * 2865 * where group_size = num_channels / num_groups 2866 * 2867 * The number of channels must be divisible by num_groups. 2868 * 2869 * Supported tensor {@link OperandCode}: 2870 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2871 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2872 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2873 * 2874 * Supported tensor rank: up to 4 2875 * 2876 * Inputs: 2877 * * 0: An n-D tensor, specifying the tensor to be shuffled. 2878 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of 2879 * groups. 2880 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the dimension 2881 * channel shuffle would be performed on. Negative index is used to 2882 * specify axis from the end (e.g. -1 for the last axis). Must be in 2883 * the range [-n, n). 2884 * 2885 * Outputs: 2886 * * 0: A tensor of the same {@link OperandCode} and same shape as input0. 2887 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 2888 * the scale and zeroPoint must be the same as input0. 2889 * 2890 * Available since API level 29. 2891 */ 2892 ANEURALNETWORKS_CHANNEL_SHUFFLE = 46, 2893 2894 /** 2895 * Apply postprocessing steps to bounding box detections. 2896 * 2897 * Bounding box detections are generated by applying transformation on a set 2898 * of predefined anchors with the bounding box deltas from bounding box 2899 * regression. A final step of hard NMS is applied to limit the number of 2900 * returned boxes. 2901 * 2902 * Supported tensor {@link OperandCode}: 2903 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2904 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2905 * 2906 * Inputs: 2907 * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying 2908 * the score of each anchor with each class. Class 0 for each 2909 * [batches, num_anchors, 0] is background and will be ignored. 2910 * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with 2911 * the first four values in length_box_encoding specifying the bounding 2912 * box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw], 2913 * where dy and dx is the linear-scale relative correction factor for the 2914 * center position of the bounding box with respect to the width and height, 2915 * dh and dw is the log-scale relative correction factor for the width and 2916 * height. All the entries in length_box_encoding beyond the first four 2917 * values are ignored in this operation. 2918 * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each 2919 * predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and 2920 * ctr_x are the center position of the box, and h and w are the height 2921 * and the width. 2922 * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling 2923 * factor for dy in bounding box deltas. 2924 * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling 2925 * factor for dx in bounding box deltas. 2926 * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling 2927 * factor for dh in bounding box deltas. 2928 * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling 2929 * factor for dw in bounding box deltas. 2930 * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to use regular 2931 * multi-class NMS algorithm that do NMS separately for each class, 2932 * set to false for a faster algorithm that only do one single NMS 2933 * using the highest class score.. 2934 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, max_num_detections, specifying 2935 * the maximum number of boxes for the output. Boxes with the lowest 2936 * scores are discarded to meet the limit. 2937 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is 2938 * set to false, specifying the maximum number of classes per detection. 2939 * * 10: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is 2940 * set to true, specifying the maximum number of detections when 2941 * applying NMS algorithm for each single class. 2942 * * 11: A scalar, score_threshold. Boxes with scores lower than the 2943 * threshold are filtered before sending to the NMS algorithm. The 2944 * scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of 2945 * {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of {@link 2946 * ANEURALNETWORKS_FLOAT32} if input0 is of {@link 2947 * ANEURALNETWORKS_TENSOR_FLOAT32}. 2948 * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar 2949 * must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of {@link 2950 * ANEURALNETWORKS_TENSOR_FLOAT16} and of {@link 2951 * ANEURALNETWORKS_FLOAT32} if input0 is of {@link 2952 * ANEURALNETWORKS_TENSOR_FLOAT32}. 2953 * * 13: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to include 2954 * background class in the list of label map for the output, set 2955 * to false to not include the background. When the background 2956 * class is included, it has label 0 and the output classes start 2957 * at 1 in the label map, otherwise, the output classes start at 0. 2958 * 2959 * Outputs: 2960 * * 0: A 2-D tensor of the same {@link OperandCode} as input0, with shape 2961 * [batches, max_num_detections], specifying the score of each output 2962 * detections. 2963 * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the 2964 * coordinates of each output bounding box, with format 2965 * [y1, x1, y2, x2]. 2966 * * 2: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 2967 * [batches, max_num_detections], specifying the class label for each 2968 * output detection. 2969 * * 3: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape [batches], 2970 * specifying the number of valid output detections for each batch. 2971 * 2972 * Available since API level 29. 2973 */ 2974 ANEURALNETWORKS_DETECTION_POSTPROCESSING = 47, 2975 2976 /** 2977 * For input tensors x and y, computes x == y elementwise. 2978 * 2979 * Supported tensor {@link OperandCode}: 2980 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 2981 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 2982 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 2983 * * {@link ANEURALNETWORKS_TENSOR_INT32} 2984 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 2985 * 2986 * Supported tensor rank: from 1 2987 * 2988 * This operation supports broadcasting. 2989 * 2990 * Inputs: 2991 * * 0: A tensor. 2992 * * 1: A tensor of the same {@link OperandCode} and dimensions compatible 2993 * with input0. 2994 * 2995 * Outputs: 2996 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 2997 * 2998 * Available since API level 29. 2999 */ 3000 ANEURALNETWORKS_EQUAL = 48, 3001 3002 /** 3003 * Computes exponential of x element-wise. 3004 * 3005 * Supported tensor {@link OperandCode}: 3006 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3007 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3008 * 3009 * Supported tensor rank: from 1. 3010 * 3011 * Inputs: 3012 * * 0: A tensor. 3013 * 3014 * Outputs: 3015 * * 0: The output tensor of same shape as input0. 3016 * 3017 * Available since API level 29. 3018 */ 3019 ANEURALNETWORKS_EXP = 49, 3020 3021 /** 3022 * Inserts a dimension of 1 into a tensor's shape. 3023 * 3024 * Given a tensor input, this operation inserts a dimension of 1 at the 3025 * given dimension index of input's shape. The dimension index starts at 3026 * zero; if you specify a negative dimension index, it is counted backward 3027 * from the end. 3028 * 3029 * Supported tensor {@link OperandCode}: 3030 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3031 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3032 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3033 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3034 * 3035 * Supported tensor rank: from 1 3036 * 3037 * Inputs: 3038 * * 0: An n-D tensor. 3039 * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the dimension 3040 * index to expand. Must be in the range [-(n + 1), (n + 1)). 3041 * 3042 * Outputs: 3043 * * 0: An (n + 1)-D tensor with the same {@link OperandCode} and data as 3044 * input0. 3045 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 3046 * the scale and zeroPoint must be the same as input0. 3047 * 3048 * Available since API level 29. 3049 */ 3050 ANEURALNETWORKS_EXPAND_DIMS = 50, 3051 3052 /** 3053 * Gathers values along an axis. 3054 * 3055 * Produces an output tensor with shape 3056 * input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:] 3057 * where: 3058 * # Vector indices (output is rank(input0)). 3059 * output[a_0, ..., a_n, i, b_0, ..., b_n] = 3060 * input0[a_0, ..., a_n, indices[i], b_0, ..., b_n] 3061 * 3062 * # Higher rank indices (output is rank(input0) + rank(indices) - 1). 3063 * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = 3064 * input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] 3065 * 3066 * Supported tensor {@link OperandCode}: 3067 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3068 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3069 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3070 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3071 * 3072 * Supported tensor rank: from 1 3073 * 3074 * Inputs: 3075 * * 0: An n-D tensor from which to gather values. 3076 * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis. 3077 * Negative index is used to specify axis from the end 3078 * (e.g. -1 for the last axis). Must be in the range [-n, n). 3079 * * 2: A k-D tensor {@link ANEURALNETWORKS_TENSOR_INT32} of indices. 3080 * The values must be in the bounds of the corresponding dimensions 3081 * of input0. 3082 * 3083 * Outputs: 3084 * * 0: An (n + k - 1)-D tensor with the same {@link OperandCode} as input0. 3085 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 3086 * the scale and zeroPoint must be the same as input0. 3087 * 3088 * Available since API level 29. 3089 */ 3090 ANEURALNETWORKS_GATHER = 51, 3091 3092 /** 3093 * Generate aixs-aligned bounding box proposals. 3094 * 3095 * Bounding box proposals are generated by applying transformation on a set 3096 * of predefined anchors with the bounding box deltas from bounding box 3097 * regression. A final step of hard NMS is applied to limit the number of 3098 * returned boxes. 3099 * 3100 * Axis-aligned bounding boxes are represented by its upper-left corner 3101 * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid 3102 * bounding box should satisfy x1 <= x2 and y1 <= y2. 3103 * 3104 * Supported tensor {@link OperandCode}: 3105 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3106 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3107 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3108 * 3109 * Inputs: 3110 * * 0: A 4-D Tensor specifying the score of each anchor at each 3111 * location. With "NHWC" data layout, the tensor shape is 3112 * [batches, height, width, num_anchors]. With "NCHW" data layout, 3113 * the tensor shape is [batches, num_anchors, height, width]. 3114 * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data 3115 * layout, the tensor shape is [batches, height, width, num_anchors * 4]. 3116 * With "NCHW" data layout, the tensor shape is 3117 * [batches, num_anchors * 4, height, width]. The box deltas are encoded 3118 * in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale 3119 * relative correction factor for the center position of the bounding box 3120 * with respect to the width and height, dw and dh is the log-scale 3121 * relative correction factor for the width and height. The last 3122 * dimensions is the channel dimension. 3123 * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each 3124 * predefined anchor, with format [x1, y1, x2, y2]. For input0 of type 3125 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should be of 3126 * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with scale of 0.125. 3127 * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of 3128 * each image in the batch, with format [image_height, image_width]. 3129 * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this 3130 * tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with 3131 * scale of 0.125. 3132 * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio 3133 * from the height of original image to the height of feature map. 3134 * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio 3135 * from the width of original image to the width of feature map. 3136 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum 3137 * number of boxes before going into the hard NMS algorithm. Boxes 3138 * with the lowest scores are discarded to meet the limit. Set to 3139 * a non-positive value for unlimited number. 3140 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum 3141 * number of boxes returning from the hard NMS algorithm. Boxes 3142 * with the lowest scores are discarded to meet the limit. Set to 3143 * a non-positive value for unlimited number. 3144 * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU 3145 * threshold for hard NMS. 3146 * * 9: An {@link ANEURALNETWORKS_FLOAT32} scalar, min_size. Boxes with 3147 * height or width lower than the absolute threshold are filtered out. 3148 * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 3149 * NCHW data layout for input0 and input1. Set to false for NHWC. 3150 * 3151 * Outputs: 3152 * * 0: A tensor of the same {@link OperandCode} as input0, of shape 3153 * [num_output_rois], specifying the score of each output box. 3154 * The boxes are grouped by batches, but the sequential order in 3155 * each batch is not guaranteed. For type of 3156 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the scale and zero 3157 * point must be the same as input0. 3158 * * 1: A tensor of the same {@link OperandCode} as input3, of shape 3159 * [num_output_rois, 4], specifying the coordinates of each output 3160 * bounding box for each class, with format [x1, y1, x2, y2]. 3161 * The sequential order of the boxes corresponds with output0. 3162 * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the 3163 * scale must be 0.125 and the zero point must be 0. 3164 * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 3165 * [num_output_rois], specifying the batch index of each box. Boxes 3166 * with the same batch index are grouped together. 3167 * 3168 * Available since API level 29. 3169 */ 3170 ANEURALNETWORKS_GENERATE_PROPOSALS = 52, 3171 3172 /** 3173 * For input tensors x and y, computes x > y elementwise. 3174 * 3175 * Supported tensor {@link OperandCode}: 3176 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3177 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3178 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3179 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3180 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3181 * 3182 * Supported tensor rank: from 1 3183 * 3184 * This operation supports broadcasting. 3185 * 3186 * Inputs: 3187 * * 0: A tensor. 3188 * * 1: A tensor of the same {@link OperandCode} and dimensions compatible 3189 * with input0. 3190 * 3191 * Outputs: 3192 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3193 * 3194 * Available since API level 29. 3195 */ 3196 ANEURALNETWORKS_GREATER = 53, 3197 /** 3198 * For input tensors x and y, computes x >= y elementwise. 3199 * 3200 * Supported tensor {@link OperandCode}: 3201 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3202 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3203 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3204 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3205 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3206 * 3207 * Supported tensor rank: from 1 3208 * 3209 * This operation supports broadcasting. 3210 * 3211 * Inputs: 3212 * * 0: A tensor. 3213 * * 1: A tensor of the same {@link OperandCode} and dimensions compatible 3214 * with input0. 3215 * 3216 * Outputs: 3217 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3218 * 3219 * Available since API level 29. 3220 */ 3221 ANEURALNETWORKS_GREATER_EQUAL = 54, 3222 3223 /** 3224 * Performs a grouped 2-D convolution operation. 3225 * 3226 * Given an input tensor of shape [batches, height, width, depth_in] and a 3227 * filter tensor of shape [depth_out, filter_height, filter_width, depth_group] 3228 * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV 3229 * applies a group of different filters to each input channel group, then 3230 * concatenates the results together. 3231 * 3232 * Specifically, the input channels are divided into num_groups groups, each with 3233 * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional 3234 * filters are also divided into num_groups groups, i.e. depth_out is divisible 3235 * by num_groups. GROUPED_CONV applies each group of filters to the corresponding 3236 * input channel group, and the result are concatenated together. 3237 * 3238 * The output dimensions are functions of the filter dimensions, stride, and 3239 * padding. 3240 * 3241 * The values in the output tensor are computed as: 3242 * 3243 * output[b, i, j, g * channel_multiplier + q] = 3244 * sum_{di, dj, dk} ( 3245 * input[b, strides[1] * i + di, strides[2] * j + dj, 3246 * g * depth_group + dk] * 3247 * filter[g * channel_multiplier + q, di, dj, dk] 3248 * ) + bias[channel] 3249 * 3250 * where channel_multiplier = depth_out / num_groups 3251 * 3252 * Supported tensor {@link OperandCode} configurations: 3253 * * 16 bit floating point: 3254 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. 3255 * 3256 * * 32 bit floating point: 3257 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. 3258 * 3259 * * Quantized: 3260 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. 3261 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to 3262 * * * input.scale * filter.scale). 3263 * 3264 * * Quantized with symmetric per channel quantization for the filter: 3265 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. 3266 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. 3267 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, 3268 * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). 3269 * 3270 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 3271 * With the default data layout NHWC, the data is stored in the order of: 3272 * [batch, height, width, channels]. Alternatively, the data layout could 3273 * be NCHW, the data storage order of: [batch, channels, height, width]. 3274 * 3275 * Both explicit padding and implicit padding are supported. 3276 * 3277 * Inputs (explicit padding): 3278 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 3279 * specifying the input, where depth_in = num_groups * depth_group. 3280 * * 1: A 4-D tensor, of shape 3281 * [depth_out, filter_height, filter_width, depth_group], specifying 3282 * the filter, where depth_out must be divisible by num_groups. For 3283 * tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} 3284 * the channel dimension (channelDim at 3285 * {@link ANeuralNetworksSymmPerChannelQuantParams}) must be set to 0. 3286 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 3287 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or 3288 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same 3289 * type. For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 3290 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint 3291 * of 0 and bias_scale == input_scale * filter_scale. For filter tensor 3292 * of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias 3293 * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 3294 * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to 3295 * bias_scale[i] = input_scale * filter_scale[i]. 3296 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 3297 * the left, in the ‘width’ dimension. 3298 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 3299 * the right, in the ‘width’ dimension. 3300 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 3301 * the top, in the ‘height’ dimension. 3302 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 3303 * the bottom, in the ‘height’ dimension. 3304 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 3305 * walking through input in the ‘width’ dimension. 3306 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 3307 * walking through input in the ‘height’ dimension. 3308 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of 3309 groups. 3310 * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 3311 * {@link FuseCode} values. Specifies the activation to 3312 * invoke on the result. 3313 * * 11: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 3314 * NCHW data layout for input0 and output0. Set to false for NHWC. 3315 * 3316 * Inputs (implicit padding): 3317 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 3318 * specifying the input, where depth_in = num_groups * depth_group. 3319 * * 1: A 4-D tensor, of shape 3320 * [depth_out, filter_height, filter_width, depth_group], specifying 3321 * the filter, where depth_out must be divisible by num_groups. For 3322 * tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} 3323 * the channel dimension (channelDim at 3324 * {@link ANeuralNetworksSymmPerChannelQuantParams}) must be set to 0. 3325 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 3326 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or 3327 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same 3328 * type. For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 3329 * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint 3330 * of 0 and bias_scale == input_scale * filter_scale. For filter tensor 3331 * of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias 3332 * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 3333 * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to 3334 * bias_scale[i] = input_scale * filter_scale[i]. 3335 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit 3336 * padding scheme, has to be one of the 3337 * {@link PaddingCode} values. 3338 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 3339 * walking through input in the ‘width’ dimension. 3340 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 3341 * walking through input in the ‘height’ dimension. 3342 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of 3343 * groups. 3344 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 3345 * {@link FuseCode} values. Specifies the activation to 3346 * invoke on the result. 3347 * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 3348 * NCHW data layout for input0 and output0. Set to false for NHWC. 3349 * 3350 * Outputs: 3351 * * 0: The output 4-D tensor, of shape 3352 * [batches, out_height, out_width, depth_out]. 3353 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 3354 * the scale and zeroPoint can be different from inputs' scale and zeroPoint. 3355 * 3356 * Available since API level 29. 3357 */ 3358 ANEURALNETWORKS_GROUPED_CONV_2D = 55, 3359 3360 /** 3361 * Localize the maximum keypoints from heatmaps. 3362 * 3363 * This operation approximates the accurate maximum keypoint scores and 3364 * indices after bicubic upscaling by using Taylor expansion up to the 3365 * quadratic term. 3366 * 3367 * The bounding box is represented by its upper-left corner coordinate 3368 * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. 3369 * A valid bounding box should satisfy x1 <= x2 and y1 <= y2. 3370 * 3371 * Supported tensor {@link OperandCode}: 3372 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3373 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3374 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3375 * 3376 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 3377 * With the default data layout NHWC, the data is stored in the order of: 3378 * [batch, height, width, channels]. Alternatively, the data layout could 3379 * be NCHW, the data storage order of: [batch, channels, height, width]. 3380 * 3381 * Inputs: 3382 * * 0: A 4-D Tensor of shape 3383 * [num_boxes, heatmap_size, heatmap_size, num_keypoints], 3384 * specifying the heatmaps, the height and width of heatmaps should 3385 * be the same, and must be greater than or equal to 2. 3386 * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes, 3387 * each with format [x1, y1, x2, y2]. For input0 of type 3388 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should 3389 * be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint 3390 * of 0 and scale of 0.125. 3391 * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 3392 * NCHW data layout for input0. Set to false for NHWC. 3393 * 3394 * Outputs: 3395 * * 0: A tensor of the same {@link OperandCode} as input0, with shape 3396 * [num_boxes, num_keypoints], specifying score of the keypoints. 3397 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 3398 * the scale and zeroPoint can be different from input0 scale and zeroPoint. 3399 * * 1: A tensor of the same {@link OperandCode} as input1, with shape 3400 * [num_boxes, num_keypoints, 2], specifying the location of 3401 * the keypoints, the second dimension is organized as 3402 * [keypoint_x, keypoint_y]. 3403 * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the 3404 * scale must be 0.125 and the zero point must be 0. 3405 * 3406 * Available since API level 29. 3407 */ 3408 ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT = 56, 3409 3410 /** 3411 * Applies instance normalization to the input tensor. 3412 * 3413 * The values in the output tensor are computed as: 3414 * 3415 * output[b, h, w, c] = 3416 * (input[b, h, w, c] - mean[b, c]) * gamma / 3417 * sqrt(var[b, c] + epsilon) + beta 3418 * 3419 * Where the mean and variance are computed across the spatial dimensions: 3420 * 3421 * mean[b, c] = 3422 * sum_{h, w}(input[b, h, w, c]) / sum(1) 3423 * 3424 * var[b, c] = 3425 * sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1) 3426 * 3427 * Supported tensor {@link OperandCode}: 3428 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3429 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3430 * 3431 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 3432 * With the default data layout NHWC, the data is stored in the order of: 3433 * [batch, height, width, channels]. Alternatively, the data layout could 3434 * be NCHW, the data storage order of: [batch, channels, height, width]. 3435 * 3436 * Inputs: 3437 * * 0: An n-D tensor, specifying the tensor to be normalized. 3438 * * 1: A scalar, specifying gamma, the scale applied to the normalized 3439 * tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if 3440 * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of {@link 3441 * ANEURALNETWORKS_FLOAT32} if input0 is of {@link 3442 * ANEURALNETWORKS_TENSOR_FLOAT32}. 3443 * * 2: A scalar, specifying beta, the offset applied to the normalized 3444 * tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if 3445 * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of {@link 3446 * ANEURALNETWORKS_FLOAT32} if input0 is of {@link 3447 * ANEURALNETWORKS_TENSOR_FLOAT32}. 3448 * * 3: A scalar, specifying epsilon, the small value added to variance to 3449 * avoid dividing by zero. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if 3450 * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of {@link 3451 * ANEURALNETWORKS_FLOAT32} if input0 is of {@link 3452 * ANEURALNETWORKS_TENSOR_FLOAT32}. 3453 * * 4: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 3454 * NCHW data layout for input0 and output0. Set to false for NHWC. 3455 * 3456 * Outputs: 3457 * * 0: A tensor of the same {@link OperandCode} and same shape as input0. 3458 * 3459 * Available since API level 29. 3460 */ 3461 ANEURALNETWORKS_INSTANCE_NORMALIZATION = 57, 3462 3463 /** 3464 * For input tensors x and y, computes x < y elementwise. 3465 * 3466 * Supported tensor {@link OperandCode}: 3467 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3468 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3469 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3470 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3471 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3472 * 3473 * Supported tensor rank: from 1 3474 * 3475 * This operation supports broadcasting. 3476 * 3477 * Inputs: 3478 * * 0: A tensor. 3479 * * 1: A tensor of the same {@link OperandCode} and dimensions compatible 3480 * with input0. 3481 * 3482 * Outputs: 3483 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3484 * 3485 * Available since API level 29. 3486 */ 3487 ANEURALNETWORKS_LESS = 58, 3488 3489 /** 3490 * For input tensors x and y, computes x <= y elementwise. 3491 * 3492 * Supported tensor {@link OperandCode}: 3493 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3494 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3495 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3496 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3497 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3498 * 3499 * Supported tensor rank: from 1 3500 * 3501 * This operation supports broadcasting. 3502 * 3503 * Inputs: 3504 * * 0: A tensor. 3505 * * 1: A tensor of the same {@link OperandCode} and dimensions compatible 3506 * with input0. 3507 * 3508 * Outputs: 3509 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3510 * 3511 * Available since API level 29. 3512 */ 3513 ANEURALNETWORKS_LESS_EQUAL = 59, 3514 3515 /** 3516 * Computes natural logarithm of x element-wise. 3517 * 3518 * Supported tensor {@link OperandCode}: 3519 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3520 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3521 * 3522 * Supported tensor rank: from 1. 3523 * 3524 * Inputs: 3525 * * 0: A tensor. 3526 * 3527 * Outputs: 3528 * * 0: The output tensor of same shape as input0. 3529 * 3530 * Available since API level 29. 3531 */ 3532 ANEURALNETWORKS_LOG = 60, 3533 3534 /** 3535 * Returns the truth value of x AND y element-wise. 3536 * 3537 * Supported tensor {@link OperandCode}: 3538 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3539 * 3540 * Supported tensor rank: from 1 3541 * 3542 * This operation supports broadcasting. 3543 * 3544 * Inputs: 3545 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3546 * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions 3547 * compatible with input0. 3548 * 3549 * Outputs: 3550 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3551 * 3552 * Available since API level 29. 3553 */ 3554 ANEURALNETWORKS_LOGICAL_AND = 61, 3555 3556 /** 3557 * Computes the truth value of NOT x element-wise. 3558 * 3559 * Supported tensor {@link OperandCode}: 3560 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3561 * 3562 * Supported tensor rank: from 1. 3563 * 3564 * Inputs: 3565 * * 0: A tensor. 3566 * 3567 * Outputs: 3568 * * 0: The output tensor of same shape as input0. 3569 * 3570 * Available since API level 29. 3571 */ 3572 ANEURALNETWORKS_LOGICAL_NOT = 62, 3573 3574 /** 3575 * Returns the truth value of x OR y element-wise. 3576 * 3577 * Supported tensor {@link OperandCode}: 3578 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3579 * 3580 * Supported tensor rank: from 1 3581 * 3582 * This operation supports broadcasting. 3583 * 3584 * Inputs: 3585 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3586 * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions 3587 * compatible with input0. 3588 * 3589 * Outputs: 3590 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3591 * 3592 * Available since API level 29. 3593 */ 3594 ANEURALNETWORKS_LOGICAL_OR = 63, 3595 3596 /** 3597 * Computes the log softmax activations given logits. 3598 * 3599 * The output is calculated using this formula: 3600 * 3601 * output = logits * beta - log(reduce_sum(exp(logits * beta), axis)) 3602 * 3603 * Supported tensor {@link OperandCode}: 3604 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3605 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3606 * 3607 * Supported tensor rank: from 1. 3608 * 3609 * Inputs: 3610 * * 0: A tensor specifying the input logits. 3611 * * 1: A scalar, specifying the positive scaling factor for the exponent, 3612 * beta. 3613 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta 3614 * value must be of {@link ANEURALNETWORKS_FLOAT16}. 3615 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta 3616 * value must be of {@link ANEURALNETWORKS_FLOAT32}. 3617 * * 2: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to 3618 * reduce across. Negative index is used to specify axis from the 3619 * end (e.g. -1 for the last axis). Must be in the range [-n, n). 3620 * 3621 * Outputs: 3622 * * 0: The output tensor of the same {@link OperandCode} and shape as 3623 * input0. 3624 * 3625 * Available since API level 29. 3626 */ 3627 ANEURALNETWORKS_LOG_SOFTMAX = 64, 3628 3629 /** 3630 * Returns the element-wise maximum of two tensors. 3631 * 3632 * Supported tensor {@link OperandCode}: 3633 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3634 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3635 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3636 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3637 * 3638 * Supported tensor rank: from 1. 3639 * 3640 * Inputs: 3641 * * 0: A tensor. 3642 * * 1: A tensor of the same {@link OperandCode} and compatible dimensions 3643 * with input0. 3644 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 3645 * the scales and zeroPoint can be different from input0 scale and zeroPoint. 3646 * 3647 * Outputs: 3648 * * 0: A tensor of the same {@link OperandCode} as input0. 3649 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 3650 * the scale and zeroPoint can be different from inputs' scale and zeroPoint. 3651 * 3652 * Available since API level 29. 3653 */ 3654 ANEURALNETWORKS_MAXIMUM = 65, 3655 3656 /** 3657 * Returns the element-wise minimum of two tensors. 3658 * 3659 * Supported tensor {@link OperandCode}: 3660 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3661 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3662 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3663 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3664 * 3665 * Supported tensor rank: from 1. 3666 * 3667 * Inputs: 3668 * * 0: A tensor. 3669 * * 1: A tensor of the same {@link OperandCode} and compatible dimensions 3670 * with input0. 3671 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 3672 * the scales and zeroPoint can be different from input0 scale and zeroPoint. 3673 * 3674 * Outputs: 3675 * * 0: A tensor of the same {@link OperandCode} as input0. 3676 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 3677 * the scale and zeroPoint can be different from inputs' scale and zeroPoint. 3678 * 3679 * Available since API level 29. 3680 */ 3681 ANEURALNETWORKS_MINIMUM = 66, 3682 3683 /** 3684 * Computes numerical negative value element-wise. 3685 * 3686 * Supported tensor {@link OperandCode}: 3687 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3688 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3689 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3690 * 3691 * Supported tensor rank: from 1. 3692 * 3693 * Inputs: 3694 * * 0: A tensor. 3695 * 3696 * Outputs: 3697 * * 0: The output tensor of same shape as input0. 3698 * 3699 * Available since API level 29. 3700 */ 3701 ANEURALNETWORKS_NEG = 67, 3702 3703 /** 3704 * For input tensors x and y, computes x != y elementwise. 3705 * 3706 * Supported tensor {@link OperandCode}: 3707 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 3708 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3709 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3710 * * {@link ANEURALNETWORKS_TENSOR_INT32} 3711 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3712 * 3713 * Supported tensor rank: from 1 3714 * 3715 * This operation supports broadcasting. 3716 * 3717 * Inputs: 3718 * * 0: A tensor. 3719 * * 1: A tensor of the same {@link OperandCode} and dimensions compatible 3720 * with input0. 3721 * 3722 * Outputs: 3723 * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}. 3724 * 3725 * Available since API level 29. 3726 */ 3727 ANEURALNETWORKS_NOT_EQUAL = 68, 3728 3729 /** 3730 * Pads a tensor with the given constant value according to the specified 3731 * paddings. 3732 * 3733 * Supported tensor {@link OperandCode}: 3734 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3735 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3736 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3737 * 3738 * Supported tensor rank: up to 4 3739 * 3740 * Inputs: 3741 * * 0: An n-D tensor, specifying the tensor to be padded. 3742 * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings 3743 * for each spatial dimension of the input tensor. The shape of the 3744 * tensor must be {rank(input0), 2}. 3745 * padding[i, 0] specifies the number of elements to be padded in the 3746 * front of dimension i. 3747 * padding[i, 1] specifies the number of elements to be padded after 3748 * the end of dimension i. 3749 * * 2: An scalar specifying the value to use for padding input0. 3750 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the 3751 * pad value must be of {@link ANEURALNETWORKS_FLOAT16}. 3752 * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the 3753 * pad value must be of {@link ANEURALNETWORKS_FLOAT32}. 3754 * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 3755 * the pad value must be of {@link ANEURALNETWORKS_INT32}. The 3756 * scale and zeroPoint are assumed to be the same as in input0. 3757 * 3758 * Outputs: 3759 * * 0: A tensor of the same {@link OperandCode} as input0. The 3760 * output tensor has the same rank as input0, and each 3761 * dimension of the output tensor has the same size as the 3762 * corresponding dimension of the input tensor plus the size 3763 * of the padding: 3764 * output0.dimension[i] = 3765 * padding[i, 0] + input0.dimension[i] + padding[i, 1] 3766 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 3767 * the scale and zeroPoint must be the same as input0. 3768 * 3769 * Available since API level 29. 3770 */ 3771 ANEURALNETWORKS_PAD_V2 = 69, 3772 3773 /** 3774 * Computes the power of one value to another. 3775 * 3776 * Given a tensor base and a tensor exponent, this operation computes 3777 * base^exponent elementwise. 3778 * 3779 * This operations supports broadcasting. The size of the output is the 3780 * maximum size along each dimension of the input operands. It starts with 3781 * the trailing dimensions, and works its way forward. 3782 * 3783 * For example: 3784 * base.dimension = {4, 1, 2} 3785 * exponent.dimension = {5, 4, 3, 1} 3786 * output.dimension = {5, 4, 3, 2} 3787 * 3788 * Supported tensor {@link OperandCode}: 3789 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3790 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3791 * 3792 * Supported tensor rank: from 1 3793 * 3794 * Inputs: 3795 * * 0: A tensor specifying the base. 3796 * * 1: A tensor specifying the exponent. 3797 * 3798 * Outputs: 3799 * * 0: An output tensor. 3800 * 3801 * Available since API level 29. 3802 */ 3803 ANEURALNETWORKS_POW = 70, 3804 3805 /** 3806 * Parametric Rectified Linear Unit. 3807 * 3808 * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha 3809 * is a learned array with the same {@link OperandCode} and compatible 3810 * dimensions as input x. 3811 * 3812 * Two dimensions are compatible when: 3813 * 1. they are equal, or 3814 * 2. one of them is 1 3815 * 3816 * The size of the output is the maximum size along each dimension of the 3817 * input operands. It starts with the trailing dimensions, and works its way 3818 * forward. 3819 * 3820 * Example: 3821 * input.dimension = {4, 1, 2} 3822 * alpha.dimension = {5, 4, 3, 1} 3823 * output.dimension = {5, 4, 3, 2} 3824 * 3825 * Supported tensor {@link OperandCode}: 3826 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3827 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3828 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3829 * 3830 * Supported tensor rank: from 1 3831 * 3832 * Inputs: 3833 * * 0: A tensor, specifying the input. 3834 * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions 3835 * as input0, specifying the alpha. 3836 * 3837 * Outputs: 3838 * * 0: A tensor of the same {@link OperandCode} as input0. 3839 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 3840 * the scale and zeroPoint can be diffent from the input0 scale and zeroPoint. 3841 * 3842 * Available since API level 29. 3843 */ 3844 ANEURALNETWORKS_PRELU = 71, 3845 3846 /** 3847 * Quantizes the input tensor. 3848 * 3849 * The formula is: 3850 * 3851 * output = max(0, min(255, round(input / scale) + zeroPoint) 3852 * 3853 * Supported tensor {@link OperandCode}: 3854 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3855 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3856 * 3857 * Supported tensor rank: from 1 3858 * 3859 * Inputs: 3860 * * 0: A tensor, may be zero-sized. 3861 * 3862 * Outputs: 3863 * * 0: The output tensor of same shape as input0, but with 3864 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}. 3865 * 3866 * Available since API level 29. 3867 */ 3868 ANEURALNETWORKS_QUANTIZE = 72, 3869 3870 /** 3871 * A version of quantized LSTM, using 16 bit quantization for internal 3872 * state. 3873 * 3874 * There is no projection layer, so cell state size is equal to the output 3875 * size. 3876 * 3877 * Inputs: 3878 * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3879 * and shape [numBatches, inputSize] specifying the input to the LSTM 3880 * cell. Tensor is quantized with a fixed quantization range of 3881 * [-1, 127/128] (scale = 1/128, zeroPoint = 128). 3882 * * 1: The input-to-input weights. 3883 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3884 * and shape [outputSize, inputSize] specifying input-to-input part of 3885 * weights for fully-connected layer inside the LSTM cell. 3886 * Quantization zero point and scale must be the same across all the 3887 * weights. 3888 * * 2: The input-to-forget weights. 3889 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3890 * and shape [outputSize, inputSize] specifying input-to-forget part of 3891 * weights for fully-connected layer inside the LSTM cell. 3892 * Quantization zero point and scale must be the same across all the 3893 * weights. 3894 * * 3: The input-to-cell weights. 3895 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3896 * and shape [outputSize, inputSize] specifying input-to-cell part of 3897 * weights for fully-connected layer inside the LSTM cell. 3898 * Quantization zero point and scale must be the same across all the 3899 * weights. 3900 * * 4: The input-to-output weights. 3901 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3902 * and shape [outputSize, inputSize] specifying input-to-output part of 3903 * weights for fully-connected layer inside the LSTM cell. 3904 * Quantization zero point and scale must be the same across all the 3905 * weights. 3906 * * 5: The recurrent-to-input weights. 3907 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3908 * and shape [outputSize, outputSize] specifying recurrent-to-input part 3909 * of weights for fully-connected layer inside the LSTM cell. 3910 * Quantization zero point and scale must be the same across all the 3911 * weights. 3912 * * 6: The recurrent-to-forget weights. 3913 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3914 * and shape [outputSize, outputSize] specifying recurrent-to-forget 3915 * part of weights for fully-connected layer inside the LSTM cell. 3916 * Quantization zero point and scale must be the same across all the 3917 * weights. 3918 * * 7: The recurrent-to-cell weights. 3919 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3920 * and shape [outputSize, outputSize] specifying recurrent-to-cell part 3921 * of weights for fully-connected layer inside the LSTM cell. 3922 * Quantization zero point and scale must be the same across all the 3923 * weights. 3924 * * 8: The recurrent-to-output weights. 3925 * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3926 * and shape [outputSize, outputSize] specifying recurrent-to-output 3927 * part of weights for fully-connected layer inside the LSTM cell. 3928 * Quantization zero point and scale must be the same across all the 3929 * weights. 3930 * * 9: The input gate bias. 3931 * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape 3932 * [outputSize] specifying the bias for the fully-connected layer 3933 * inside the LSTM cell. Bias is quantized with scale being a product 3934 * of input and weights scales and zeroPoint equal to 0. 3935 * * 10:The forget gate bias. 3936 * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape 3937 * [outputSize] specifying the bias for the fully-connected layer 3938 * inside the LSTM cell. Bias is quantized with scale being a product 3939 * of input and weights scales and zeroPoint equal to 0. 3940 * * 11:The cell bias. 3941 * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape 3942 * [outputSize] specifying the bias for the fully-connected layer 3943 * inside the LSTM cell. Bias is quantized with scale being a product 3944 * of input and weights scales and zeroPoint equal to 0. 3945 * * 12:The output gate bias. 3946 * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape 3947 * [outputSize] specifying the bias for the fully-connected layer 3948 * inside the LSTM cell. Bias is quantized with scale being a product 3949 * of input and weights scales and zeroPoint equal to 0. 3950 * * 13: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 3951 * and shape [numBatches, outputSize] specifying the cell state from the 3952 * previous time step of the LSTM cell. It is quantized using a 3953 * quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 3954 * 32768, zeroPoint = 0). 3955 * * 14: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3956 * and shape [numBathes, outputSize] specifying the output of the LSTM 3957 * cell from previous time-step. Tensor is quantized with a fixed 3958 * quantization range of [-1, 127/128] (scale = 1/128, zeroPoint = 3959 * 128). 3960 * 3961 * 3962 * Outputs: 3963 * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM} 3964 * and shape [numBatches, outputSize] which contains a cell state from 3965 * the current time step. Tensor is quantized using a quantization 3966 * range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint = 3967 * 0). 3968 * * 1: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 3969 * and shape [numBathes, outputSize] which contains the output value. 3970 * Tensor is quantized with a fixed quantization range of [-1, 127/128] 3971 * (scale = 1/128, zeroPoint = 128). 3972 */ 3973 ANEURALNETWORKS_QUANTIZED_16BIT_LSTM = 73, 3974 3975 /** 3976 * Draws samples from a multinomial distribution. 3977 * 3978 * Supported tensor {@link OperandCode}: 3979 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 3980 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 3981 * 3982 * Inputs: 3983 * * 0: A 2-D tensor with shape [batches, classes], specifying the 3984 * unnormalized log-probabilities for all classes. 3985 * * 1: A scalar {@link ANEURALNETWORKS_INT32}, specifying the number of 3986 * independent samples to draw for each row slice. 3987 * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [2], 3988 * specifying seeds used to initialize the random distribution. 3989 * Outputs: 3990 * * 0: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape 3991 * [batches, samples], containing the drawn samples. 3992 * 3993 * Available since API level 29. 3994 */ 3995 ANEURALNETWORKS_RANDOM_MULTINOMIAL = 74, 3996 3997 /** 3998 * Reduces a tensor by computing the "logical and" of elements along given 3999 * dimensions. 4000 * 4001 * If keep_dims is true, the reduced dimensions are 4002 * retained with length 1. Otherwise, the rank of the tensor is reduced by 4003 * 1 for each entry in dimensions. 4004 * 4005 * Supported tensor {@link OperandCode}: 4006 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 4007 * 4008 * Supported tensor rank: up to 4 4009 * 4010 * Inputs: 4011 * * 0: An n-D tensor. 4012 * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions 4013 * to reduce. Dimension values must be in the range [-n, n). 4014 * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, 4015 * retains reduced dimensions with length 1. 4016 * 4017 * Outputs: 4018 * * 0: A tensor of the same {@link OperandCode} as input0. 4019 * 4020 * Available since API level 29. 4021 */ 4022 ANEURALNETWORKS_REDUCE_ALL = 75, 4023 4024 /** 4025 * Reduces a tensor by computing the "logical or" of elements along given 4026 * dimensions. 4027 * 4028 * If keep_dims is true, the reduced dimensions are 4029 * retained with length 1. Otherwise, the rank of the tensor is reduced by 4030 * 1 for each entry in dimensions. 4031 * 4032 * Supported tensor {@link OperandCode}: 4033 * * {@link ANEURALNETWORKS_TENSOR_BOOL8} 4034 * 4035 * Supported tensor rank: up to 4 4036 * 4037 * Inputs: 4038 * * 0: An n-D tensor. 4039 * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions 4040 * to reduce. Dimension values must be in the range [-n, n). 4041 * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, 4042 * retains reduced dimensions with length 1. 4043 * 4044 * Outputs: 4045 * * 0: A tensor of the same {@link OperandCode} as input0. 4046 * 4047 * Available since API level 29. 4048 */ 4049 ANEURALNETWORKS_REDUCE_ANY = 76, 4050 4051 /** 4052 * Reduces a tensor by computing the maximum of elements along given 4053 * dimensions. 4054 * 4055 * If keep_dims is true, the reduced dimensions are 4056 * retained with length 1. Otherwise, the rank of the tensor is reduced by 4057 * 1 for each entry in dimensions. 4058 * 4059 * Supported tensor {@link OperandCode}: 4060 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4061 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4062 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4063 * 4064 * Supported tensor rank: up to 4 4065 * 4066 * Inputs: 4067 * * 0: An n-D tensor. 4068 * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions 4069 * to reduce. Dimension values must be in the range [-n, n). 4070 * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, 4071 * retains reduced dimensions with length 1. 4072 * 4073 * Outputs: 4074 * * 0: A tensor of the same {@link OperandCode} as input0. 4075 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4076 * the scale and zeroPoint must be the same as input0. 4077 * 4078 * Available since API level 29. 4079 */ 4080 ANEURALNETWORKS_REDUCE_MAX = 77, 4081 4082 /** 4083 * Reduces a tensor by computing the minimum of elements along given 4084 * dimensions. 4085 * 4086 * If keep_dims is true, the reduced dimensions are 4087 * retained with length 1. Otherwise, the rank of the tensor is reduced by 4088 * 1 for each entry in dimensions. 4089 * 4090 * Supported tensor {@link OperandCode}: 4091 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4092 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4093 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4094 * 4095 * Supported tensor rank: up to 4 4096 * 4097 * Inputs: 4098 * * 0: An n-D tensor. 4099 * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions 4100 * to reduce. Dimension values must be in the range [-n, n). 4101 * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, 4102 * retains reduced dimensions with length 1. 4103 * 4104 * Outputs: 4105 * * 0: A tensor of the same {@link OperandCode} as input0. 4106 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4107 * the scale and zeroPoint must be the same as input0. 4108 * 4109 * Available since API level 29. 4110 */ 4111 ANEURALNETWORKS_REDUCE_MIN = 78, 4112 4113 /** 4114 * Reduces a tensor by multiplying elements along given dimensions. 4115 * 4116 * If keep_dims is true, the reduced dimensions are 4117 * retained with length 1. Otherwise, the rank of the tensor is reduced by 4118 * 1 for each entry in dimensions. 4119 * 4120 * Supported tensor {@link OperandCode}: 4121 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4122 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4123 * 4124 * Supported tensor rank: up to 4 4125 * 4126 * Inputs: 4127 * * 0: An n-D tensor. 4128 * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions 4129 * to reduce. Dimension values must be in the range [-n, n). 4130 * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, 4131 * retains reduced dimensions with length 1. 4132 * 4133 * Outputs: 4134 * * 0: A tensor of the same {@link OperandCode} as input0. 4135 * 4136 * Available since API level 29. 4137 */ 4138 ANEURALNETWORKS_REDUCE_PROD = 79, 4139 4140 /** 4141 * Reduces a tensor by summing elements along given dimensions. 4142 * 4143 * If keep_dims is true, the reduced dimensions are 4144 * retained with length 1. Otherwise, the rank of the tensor is reduced by 4145 * 1 for each entry in dimensions. 4146 * 4147 * Supported tensor {@link OperandCode}: 4148 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4149 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4150 * 4151 * Supported tensor rank: up to 4 4152 * 4153 * Inputs: 4154 * * 0: An n-D tensor. 4155 * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions 4156 * to reduce. Dimension values must be in the range [-n, n). 4157 * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true, 4158 * retains reduced dimensions with length 1. 4159 * 4160 * Outputs: 4161 * * 0: A tensor of the same {@link OperandCode} as input0. 4162 * 4163 * Available since API level 29. 4164 */ 4165 ANEURALNETWORKS_REDUCE_SUM = 80, 4166 4167 /** 4168 * Select and scale the feature map of each region of interest to a unified 4169 * output size by average pooling sampling points from bilinear interpolation. 4170 * 4171 * The region of interest is represented by its upper-left corner coordinate 4172 * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. 4173 * A spatial scaling factor is applied to map into feature map coordinate. 4174 * A valid region of interest should satisfy x1 <= x2 and y1 <= y2. 4175 * 4176 * No rounding is applied in this operation. The sampling points are unified 4177 * distributed in the pooling bin and their values are calculated by bilinear 4178 * interpolation. 4179 * 4180 * Supported tensor {@link OperandCode}: 4181 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29) 4182 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4183 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4184 * 4185 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 4186 * With the default data layout NHWC, the data is stored in the order of: 4187 * [batch, height, width, channels]. Alternatively, the data layout could 4188 * be NCHW, the data storage order of: [batch, channels, height, width]. 4189 * 4190 * Inputs: 4191 * * 0: A 4-D tensor, specifying the feature map. 4192 * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of 4193 * the regions of interest, each line with format [x1, y1, x2, y2]. 4194 * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 4195 * this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, 4196 * with zeroPoint of 0 and scale of 0.125. Zero num_rois is 4197 * supported for this tensor. 4198 * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 4199 * [num_rois], specifying the batch index of each box. Boxes with 4200 * the same batch index are grouped together. Zero num_rois is 4201 * supported for this tensor. 4202 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 4203 * height of the output tensor. 4204 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 4205 * width of the output tensor. 4206 * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio 4207 * from the height of original image to the height of feature map. 4208 * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio 4209 * from the width of original image to the width of feature map. 4210 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of 4211 * sampling points in height dimension used to compute the output. 4212 * Set to 0 for adaptive value of ceil(roi_height/out_height). 4213 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of 4214 * sampling points in width dimension used to compute the output. 4215 * Set to 0 for adaptive value of ceil(roi_width/out_width). 4216 * * 9: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 4217 * NCHW data layout for input0 and output0. Set to false for NHWC. 4218 * 4219 * Outputs: 4220 * * 0: A tensor of the same {@link OperandCode} as input0. The output 4221 * shape is [num_rois, out_height, out_width, depth]. 4222 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4223 * the scale and zeroPoint can be different from the input0 scale and zeroPoint. 4224 * 4225 * Available since API level 29. 4226 */ 4227 ANEURALNETWORKS_ROI_ALIGN = 81, 4228 4229 /** 4230 * Select and scale the feature map of each region of interest to a unified 4231 * output size by max-pooling. 4232 * 4233 * The region of interest is represented by its upper-left corner coordinate 4234 * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image. 4235 * A spatial scaling factor is applied to map into feature map coordinate. 4236 * A valid region of interest should satisfy x1 <= x2 and y1 <= y2. 4237 * 4238 * Rounding is applied in this operation to ensure integer boundary for 4239 * regions of interest and pooling bins. 4240 * 4241 * Supported tensor {@link OperandCode}: 4242 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4243 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4244 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4245 * 4246 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 4247 * With the default data layout NHWC, the data is stored in the order of: 4248 * [batch, height, width, channels]. Alternatively, the data layout could 4249 * be NCHW, the data storage order of: [batch, channels, height, width]. 4250 * 4251 * Inputs: 4252 * * 0: A 4-D tensor, specifying the feature map. 4253 * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of 4254 * the regions of interest, each line with format [x1, y1, x2, y2]. 4255 * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, 4256 * this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, 4257 * with zeroPoint of 0 and scale of 0.125. 4258 * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape 4259 * [num_rois], specifying the batch index of each box. Boxes with 4260 * the same batch index are grouped together. 4261 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 4262 * height of the output tensor. 4263 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 4264 * width of the output tensor. 4265 * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio 4266 * from the height of original image to the height of feature map. 4267 * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio 4268 * from the width of original image to the width of feature map. 4269 * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 4270 * NCHW data layout for input0 and output0. Set to false for NHWC. 4271 * 4272 * Outputs: 4273 * * 0: A tensor of the same {@link OperandCode} as input0. The output 4274 * shape is [num_rois, out_height, out_width, depth]. 4275 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4276 * the scale and zeroPoint must be the same as input0. 4277 * 4278 * Available since API level 29. 4279 */ 4280 ANEURALNETWORKS_ROI_POOLING = 82, 4281 4282 /** 4283 * Computes reciprocal of square root of x element-wise. 4284 * 4285 * Supported tensor {@link OperandCode}: 4286 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4287 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4288 * 4289 * Supported tensor rank: from 1. 4290 * 4291 * Inputs: 4292 * * 0: A tensor. 4293 * 4294 * Outputs: 4295 * * 0: The output tensor of same shape as input0. 4296 * 4297 * Available since API level 29. 4298 */ 4299 ANEURALNETWORKS_RSQRT = 83, 4300 4301 /** 4302 * Using a tensor of booleans c and input tensors x and y select values 4303 * elementwise from both input tensors: 4304 * 4305 * O[i] = C[i] ? x[i] : y[i]. 4306 * 4307 * Supported tensor {@link OperandCode}: 4308 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4309 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4310 * * {@link ANEURALNETWORKS_TENSOR_INT32} 4311 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4312 * 4313 * Supported tensor rank: from 1 4314 * 4315 * Inputs: 4316 * * 0: A tensor of type {@link ANEURALNETWORKS_TENSOR_BOOL8} acting as a 4317 * mask that chooses, based on the value at each element, whether the 4318 * corresponding element in the output should be taken from input1 (if 4319 * true) or input2 (if false). 4320 * * 1: An input tensor of the same shape as input0. 4321 * * 2: An input tensor of the same shape and type as input1. 4322 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4323 * the scales and zeroPoint can be different from input1 scale and zeroPoint. 4324 * 4325 * Outputs: 4326 * * 0: A tensor of the same type and shape as input1 and input2. 4327 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4328 * the scale and zeroPoint can be different from inputs' scale and zeroPoint. 4329 * 4330 */ 4331 ANEURALNETWORKS_SELECT = 84, 4332 4333 /** 4334 * Computes sin of x element-wise. 4335 * 4336 * Supported tensor {@link OperandCode}: 4337 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4338 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4339 * 4340 * Supported tensor rank: from 1. 4341 * 4342 * Inputs: 4343 * * 0: A tensor. 4344 * 4345 * Outputs: 4346 * * 0: The output tensor of same shape as input0. 4347 * 4348 * Available since API level 29. 4349 */ 4350 ANEURALNETWORKS_SIN = 85, 4351 4352 /** 4353 * Extracts a slice of specified size from the input tensor starting at a 4354 * specified location. 4355 * 4356 * The starting location is specified as a 1-D tensor containing offsets 4357 * for each dimension. The size is specified as a 1-D tensor containing 4358 * either size of a slice along corresponding dimension or -1. In the latter 4359 * case, all the remaining elements in dimension are included in the slice. 4360 * 4361 * A sum of begin offset and a size of a slice must not exceed size of a 4362 * corresponding dimension. 4363 * 4364 * Supported tensor {@link OperandCode}: 4365 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4366 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4367 * * {@link ANEURALNETWORKS_TENSOR_INT32} 4368 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4369 * 4370 * Supported tensor rank: from 1 4371 * 4372 * Inputs: 4373 * * 0: An n-D tensor to take slice from, may be zero-sized. 4374 * * 1: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying 4375 * the beginning indices of the slice in each dimension. 4376 * * 2: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying 4377 * the size of the slice in each dimension. 4378 * 4379 * Outputs: 4380 * * 0: An n-D tensor of the same type as the input containing the slice. 4381 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4382 * its scale and zeroPoint has to be same as the input0 scale and zeroPoint. 4383 * 4384 * Available since API level 29. 4385 */ 4386 ANEURALNETWORKS_SLICE = 86, 4387 4388 /** 4389 * Splits a tensor along a given axis into num_splits subtensors. 4390 * 4391 * Supported tensor {@link OperandCode}: 4392 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4393 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4394 * * {@link ANEURALNETWORKS_TENSOR_INT32} 4395 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4396 * 4397 * Supported tensor rank: from 1 4398 * 4399 * Inputs: 4400 * * 0: An n-D tensor to split. 4401 * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis along 4402 * which to split. 4403 * * 2: An {@link ANEURALNETWORKS_INT32} scalar indicating the number of 4404 * splits along given axis. Must evenly divide axis size. 4405 * 4406 * Outputs: 4407 * * 0 ~ (num_splits - 1): Resulting subtensors. 4408 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4409 * the scale and zeroPoint must be the same as input0. 4410 * 4411 * Available since API level 29. 4412 */ 4413 ANEURALNETWORKS_SPLIT = 87, 4414 4415 /** 4416 * Computes square root of x element-wise. 4417 * 4418 * Supported tensor {@link OperandCode}: 4419 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4420 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4421 * 4422 * Supported tensor rank: from 1. 4423 * 4424 * Inputs: 4425 * * 0: A tensor. 4426 * 4427 * Outputs: 4428 * * 0: The output tensor of same shape as input0. 4429 * 4430 * Available since API level 29. 4431 */ 4432 ANEURALNETWORKS_SQRT = 88, 4433 4434 /** 4435 * Constructs a tensor by tiling a given tensor. 4436 * 4437 * This operation creates a new tensor by replicating `input` `multiples` 4438 * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]` 4439 * elements, and the values of `input` are replicated `multiples[i]` times 4440 * along the i-th dimension. 4441 * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`. 4442 * 4443 * Supported tensor {@link OperandCode}: 4444 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4445 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4446 * * {@link ANEURALNETWORKS_TENSOR_INT32} 4447 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4448 * 4449 * Supported tensor rank: from 1 4450 * 4451 * Inputs: 4452 * * 0: input, an n-D tensor specifying the input. 4453 * * 1: multiples, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. 4454 * The length of multiples must be n. 4455 * 4456 * Outputs: 4457 * * 0: A tiled tensor of the same {@link OperandCode} and rank as `input`. 4458 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4459 * the scale and zeroPoint must be the same as input0. 4460 * 4461 * Available since API level 29. 4462 */ 4463 ANEURALNETWORKS_TILE = 89, 4464 4465 /** 4466 * Finds values and indices of the k largest entries for the last dimension. 4467 * 4468 * Resulting values in each dimensions are sorted in descending order. If 4469 * two values are equal, the one with larger index appears first. 4470 * 4471 * Supported tensor {@link OperandCode}: 4472 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4473 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4474 * * {@link ANEURALNETWORKS_TENSOR_INT32} 4475 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4476 * 4477 * Supported tensor rank: from 1 4478 * 4479 * Inputs: 4480 * * 0: input, an n-D tensor specifying the input. 4481 * * 1: k, an {@link ANEURALNETWORKS_INT32} scalar, specifying the number of 4482 * top elements to look for along the last dimension. 4483 * 4484 * Outputs: 4485 * * 0: An n-D tensor of the same type as the input, containing the k 4486 * largest elements along each last dimensional slice. 4487 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4488 * the scale and zeroPoint must be the same as input0. 4489 * * 1: An n-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} 4490 * containing the indices of values within the last dimension of input. 4491 * 4492 * Available since API level 29. 4493 */ 4494 ANEURALNETWORKS_TOPK_V2 = 90, 4495 4496 /** 4497 * Performs the transpose of 2-D convolution operation. 4498 * 4499 * This operation is sometimes called "deconvolution" after Deconvolutional 4500 * Networks, but is actually the transpose (gradient) of 4501 * {@link ANEURALNETWORKS_CONV_2D} rather than an actual deconvolution. 4502 * 4503 * The output dimensions are functions of the filter dimensions, stride, and 4504 * padding. 4505 * 4506 * Supported tensor {@link OperandCode} configurations: 4507 * * 16 bit floating point: 4508 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias. 4509 * 4510 * * 32 bit floating point: 4511 * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias. 4512 * 4513 * * Quantized: 4514 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output. 4515 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to 4516 * * * input.scale * filter.scale). 4517 * 4518 * * Quantized with symmetric per channel quantization for the filter: 4519 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output. 4520 * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter. 4521 * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0, 4522 * * * each value scaling is separate and equal to input.scale * filter.scales[channel]). 4523 * 4524 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 4525 * With the default data layout NHWC, the data is stored in the order of: 4526 * [batch, height, width, channels]. Alternatively, the data layout could 4527 * be NCHW, the data storage order of: [batch, channels, height, width]. 4528 * 4529 * Both explicit padding and implicit padding are supported. 4530 * 4531 * Inputs (explicit padding): 4532 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 4533 * specifying the input. Since API level 29, zero batches is supported 4534 * for this tensor. 4535 * * 1: A 4-D tensor, of shape 4536 * [depth_out, filter_height, filter_width, depth_in], specifying the 4537 * filter. For tensor of type 4538 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel 4539 * dimension (extraParams.channelQuant.channelDim) must be set to 0. 4540 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 4541 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or 4542 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias should be of the 4543 * same type. For input tensor of type 4544 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias should be 4545 * of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and 4546 * bias_scale == input_scale * filter_scale. For filter tensor of 4547 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias 4548 * must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 4549 * 0 and bias_scale of 0. The actual scale of each value 'i' is equal 4550 * to bias_scale[i] = input_scale * filter_scale[i]. 4551 * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 4552 * the left, in the ‘width’ dimension. 4553 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 4554 * the right, in the ‘width’ dimension. 4555 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 4556 * the top, in the ‘height’ dimension. 4557 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on 4558 * the bottom, in the ‘height’ dimension. 4559 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 4560 * walking through input in the ‘width’ dimension. 4561 * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 4562 * walking through input in the ‘height’ dimension. 4563 * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 4564 * {@link FuseCode} values. Specifies the activation to 4565 * invoke on the result. 4566 * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 4567 * NCHW data layout for input0 and output0. Set to false for NHWC. 4568 * 4569 * Inputs (implicit padding): 4570 * * 0: A 4-D tensor, of shape [batches, height, width, depth_in], 4571 * specifying the input. Since API level 29, zero batches is supported 4572 * for this tensor. 4573 * * 1: A 4-D tensor, of shape 4574 * [depth_out, filter_height, filter_width, depth_in], specifying the 4575 * filter. For tensor of type 4576 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel 4577 * dimension (extraParams.channelQuant.channelDim) must be set to 0. 4578 * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input 4579 * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or 4580 * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias should be of the 4581 * same type. For input tensor of type 4582 * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias should be 4583 * of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and 4584 * bias_scale == input_scale * filter_scale. For filter tensor of 4585 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias 4586 * must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 4587 * 0 and bias_scale of 0. The actual scale of each value 'i' is equal 4588 * to bias_scale[i] = input_scale * filter_scale[i]. 4589 * * 3: An {@link ANEURALNETWORKS_TENSOR_INT32} tensor, specifying the output 4590 * tensor shape. 4591 * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit 4592 * padding scheme, has to be one of the 4593 * {@link PaddingCode} values. 4594 * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 4595 * walking through input in the ‘width’ dimension. 4596 * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when 4597 * walking through input in the ‘height’ dimension. 4598 * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the 4599 * {@link FuseCode} values. Specifies the activation to 4600 * invoke on the result. 4601 * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify 4602 * NCHW data layout for input0 and output0. Set to false for NHWC. 4603 * 4604 * Outputs: 4605 * * 0: The output 4-D tensor, of shape 4606 * [batches, out_height, out_width, depth_out]. 4607 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4608 * the scale and zeroPoint can be different from inputs' scale and zeroPoint. 4609 * 4610 * Available since API level 29. 4611 */ 4612 ANEURALNETWORKS_TRANSPOSE_CONV_2D = 91, 4613 4614 /** 4615 * A recurrent neural network specified by an LSTM cell. 4616 * 4617 * Performs (fully) dynamic unrolling of input. 4618 * 4619 * This Op unrolls the input along the time dimension, and implements the 4620 * following operation for each element in the sequence 4621 * s = 1...sequence_length: 4622 * outputs[s] = projection(state = activation(LSTMOp(inputs[s]))) 4623 * 4624 * Where LSTMOp is the LSTM op as in {@link ANEURALNETWORKS_LSTM}, 4625 * the "projection" is an optional projection layer from state and output 4626 * and the “activation” is the function passed as the 4627 * “fused_activation_function” argument (if not “NONE”). 4628 * 4629 * Supported tensor {@link OperandCode}: 4630 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4631 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4632 * 4633 * Supported tensor rank: 3, either time-major or batch-major. 4634 * 4635 * All input and output tensors must be of the same type. 4636 * 4637 * Inputs: 4638 * * 0: The input (\f$x_t\f$). 4639 * A 3-D tensor of shape: 4640 * If time-major: [max_time, batch_size, input_size] 4641 * If batch-major: [batch_size, max_time, input_size] 4642 * where “max_time” is the number of timesteps (sequence length), 4643 * “batch_size” corresponds to the batching dimension, and 4644 * “input_size” is the size of the input. 4645 * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional. 4646 * A 2-D tensor of shape [num_units, input_size], where “num_units” 4647 * corresponds to the number of cell units. 4648 * * 2: The input-to-forget weights (\f$W_{xf}\f$). 4649 * A 2-D tensor of shape [num_units, input_size]. 4650 * * 3: The input-to-cell weights (\f$W_{xc}\f$). 4651 * A 2-D tensor of shape [num_units, input_size]. 4652 * * 4: The input-to-output weights (\f$W_{xo}\f$). 4653 * A 2-D tensor of shape [num_units, input_size]. 4654 * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional. 4655 * A 2-D tensor of shape [num_units, output_size], where “output_size” 4656 * corresponds to either the number of cell units (i.e., “num_units”), 4657 * or the second dimension of the “projection_weights”, if defined. 4658 * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$). 4659 * A 2-D tensor of shape [num_units, output_size]. 4660 * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$). 4661 * A 2-D tensor of shape [num_units, output_size]. 4662 * * 8: The recurrent-to-output weights (\f$W_{ho}\f$). 4663 * A 2-D tensor of shape [num_units, output_size]. 4664 * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional. 4665 * A 1-D tensor of shape [num_units]. 4666 * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional. 4667 * A 1-D tensor of shape [num_units]. 4668 * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional. 4669 * A 1-D tensor of shape [num_units]. 4670 * * 12:The input gate bias (\f$b_i\f$). Optional. 4671 * A 1-D tensor of shape [num_units]. 4672 * * 13:The forget gate bias (\f$b_f\f$). 4673 * A 1-D tensor of shape [num_units]. 4674 * * 14:The cell bias (\f$b_c\f$). 4675 * A 1-D tensor of shape [num_units]. 4676 * * 15:The output gate bias (\f$b_o\f$). 4677 * A 1-D tensor of shape [num_units]. 4678 * * 16:The projection weights (\f$W_{proj}\f$). Optional. 4679 * A 2-D tensor of shape [output_size, num_units]. 4680 * * 17:The projection bias (\f$b_{proj}\f$). Optional. 4681 * A 1-D tensor of shape [output_size]. 4682 * * 18:The output state (in) (\f$h_{t-1}\f$). 4683 * A 2-D tensor of shape [batch_size, output_size]. 4684 * * 19:The cell state (in) (\f$C_{t-1}\f$). 4685 * A 2-D tensor of shape [batch_size, num_units]. 4686 * * 20:The activation function (\f$g\f$). 4687 * A value indicating the activation function: 4688 * <ul> 4689 * <li>0: None; 4690 * <li>1: Relu; 4691 * <li>3: Relu6; 4692 * <li>4: Tanh; 4693 * <li>6: Sigmoid. 4694 * </ul> 4695 * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such 4696 * that values are bound within [-cell_clip, cell_clip]. If set to 0.0 4697 * then clipping is disabled. 4698 * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the 4699 * projection layer, such that values are bound within 4700 * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. 4701 * * 23:Time-major if true, batch-major if false. 4702 * * 24:The input layer normalization weights. Optional. 4703 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 4704 * to activation at input gate. 4705 * * 25:The forget layer normalization weights. Optional. 4706 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 4707 * to activation at forget gate. 4708 * * 26:The cell layer normalization weights. Optional. 4709 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 4710 * to activation at cell gate. 4711 * * 27:The output layer normalization weights. Optional. 4712 * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs 4713 * to activation at output gate. 4714 * 4715 * Outputs: 4716 * * 0: The output (\f$o_t\f$). 4717 * A 3-D tensor of shape: 4718 * If time-major: [max_time, batch_size, output_size] 4719 * If batch-major: [batch_size, max_time, output_size] 4720 * 4721 * Available since API level 29. 4722 */ 4723 ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM = 92, 4724 4725 /** 4726 * A recurrent neural network layer that applies a basic RNN cell to a 4727 * sequence of inputs. 4728 * 4729 * This layer unrolls the input along the sequence dimension, and implements 4730 * the following operation 4731 * for each element in the sequence s = 1...sequence_length: 4732 * outputs[s] = state = activation(inputs[s] * input_weights’ + state * 4733 * recurrent_weights’ + bias) 4734 * 4735 * Where: 4736 * * “input_weights” is a weight matrix that multiplies the inputs; 4737 * * “recurrent_weights” is a weight matrix that multiplies the current 4738 * “state” which itself is the output from the previous time step 4739 * computation; 4740 * * “bias” is a bias vector (added to each output vector in the batch); 4741 * * “activation” is the function passed as the “fused_activation_function” 4742 * argument (if not “NONE”). 4743 * 4744 * Supported tensor {@link OperandCode}: 4745 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4746 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4747 * 4748 * The input tensors must all be the same type. 4749 * 4750 * Inputs: 4751 * * 0: input. 4752 * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If 4753 * it is set to 1, then the input has a shape [maxTime, batchSize, 4754 * inputSize], otherwise the input has a shape [batchSize, maxTime, 4755 * inputSize]. 4756 * * 1: weights. 4757 * A 2-D tensor of shape [numUnits, inputSize]. 4758 * * 2: recurrent_weights. 4759 * A 2-D tensor of shape [numUnits, numUnits]. 4760 * * 3: bias. 4761 * A 1-D tensor of shape [numUnits]. 4762 * * 4: hidden state 4763 * A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden 4764 * state input for the first time step of the computation. 4765 * * 5: fusedActivationFunction. 4766 * A {@link FuseCode} value indicating the activation function. If 4767 * “NONE” is specified then it results in a linear activation. 4768 * * 6: timeMajor 4769 * An {@link ANEURALNETWORKS_INT32} scalar specifying the shape format 4770 * of input and output tensors. Must be set to either 0 or 1. 4771 * Outputs: 4772 * * 0: output. 4773 * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If 4774 * it is set to 1, then the output has a shape [maxTime, batchSize, 4775 * numUnits], otherwise the output has a shape [batchSize, maxTime, 4776 * numUnits]. 4777 * 4778 * Available since API level 29. 4779 */ 4780 ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN = 93, 4781 4782 /** 4783 * Resizes images to given size using the nearest neighbor interpretation. 4784 * 4785 * Resized images must be distorted if their output aspect ratio is not the 4786 * same as input aspect ratio. The corner pixels of output may not be the 4787 * same as corner pixels of input. 4788 * 4789 * Supported tensor {@link OperandCode}: 4790 * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} 4791 * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} 4792 * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} 4793 * 4794 * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout. 4795 * With the default data layout NHWC, the data is stored in the order of: 4796 * [batch, height, width, channels]. Alternatively, the data layout could 4797 * be NCHW, the data storage order of: [batch, channels, height, width]. 4798 * 4799 * Both resizing by shape and resizing by scale are supported. 4800 * 4801 * Inputs (resizing by shape): 4802 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 4803 * the input. Zero batches is supported for this tensor. 4804 * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 4805 * width of the output tensor. 4806 * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output 4807 * height of the output tensor. 4808 * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false. 4809 * Set to true to specify NCHW data layout for input0 and output0. 4810 * 4811 * Inputs (resizing by scale): 4812 * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying 4813 * the input. Zero batches is supported for this tensor. 4814 * * 1: A scalar, specifying width_scale, the scaling factor of the width 4815 * dimension from the input tensor to the output tensor. The output 4816 * width is calculated as new_width = floor(width * width_scale). 4817 * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is 4818 * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of 4819 * {@link ANEURALNETWORKS_FLOAT32} otherwise. 4820 * * 2: A scalar, specifying height_scale, the scaling factor of the height 4821 * dimension from the input tensor to the output tensor. The output 4822 * height is calculated as new_height = floor(height * height_scale). 4823 * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is 4824 * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of 4825 * {@link ANEURALNETWORKS_FLOAT32} otherwise. 4826 * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false. 4827 * Set to true to specify NCHW data layout for input0 and output0. 4828 * 4829 * Outputs: 4830 * * 0: The output 4-D tensor, of shape 4831 * [batches, new_height, new_width, depth]. 4832 * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor, 4833 * the scale and zeroPoint must be the same as input0. 4834 * 4835 * Available since API level 29. 4836 */ 4837 ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR = 94, 4838 } OperationCode; 4839 4840 /** 4841 * Fused activation function types. 4842 * 4843 * 4844 * Available since API level 27. 4845 */ 4846 typedef enum { 4847 /** NO fused activation function. */ 4848 ANEURALNETWORKS_FUSED_NONE = 0, 4849 /** Fused ReLU activation function. */ 4850 ANEURALNETWORKS_FUSED_RELU = 1, 4851 /** Fused ReLU1 activation function. */ 4852 ANEURALNETWORKS_FUSED_RELU1 = 2, 4853 /** Fused ReLU6 activation function. */ 4854 ANEURALNETWORKS_FUSED_RELU6 = 3, 4855 } FuseCode; 4856 4857 /** 4858 * Implicit padding algorithms. 4859 * 4860 * 4861 * Available since API level 27. 4862 */ 4863 typedef enum { 4864 /** 4865 * SAME padding. 4866 * Padding on both ends are the "same": 4867 * padding_to_beginning = total_padding / 2 4868 * padding_to_end = (total_padding + 1)/2. 4869 * i.e., for even number of padding, padding to both ends are exactly 4870 * the same; for odd number of padding, padding to the ending is bigger 4871 * than the padding to the beginning by 1. 4872 * 4873 * total_padding is a function of input, stride and filter size. 4874 * It could be computed as follows: 4875 * out_size = (input + stride - 1) / stride; 4876 * needed_input = (out_size - 1) * stride + filter_size 4877 * total_padding = max(0, needed_input - input_size) 4878 * The computation is the same for the horizontal and vertical directions. 4879 */ 4880 ANEURALNETWORKS_PADDING_SAME = 1, 4881 4882 /** 4883 * VALID padding. 4884 * No padding. When the input size is not evenly divisible by 4885 * the filter size, the input at the end that could not fill 4886 * the whole filter tile will simply be ignored. 4887 */ 4888 ANEURALNETWORKS_PADDING_VALID = 2, 4889 } PaddingCode; 4890 4891 /** 4892 * Execution preferences. 4893 * 4894 * Available since API level 27. 4895 */ 4896 typedef enum { 4897 /** 4898 * Prefer executing in a way that minimizes battery drain. 4899 * This is desirable for compilations that will be executed often. 4900 */ 4901 ANEURALNETWORKS_PREFER_LOW_POWER = 0, 4902 /** 4903 * Prefer returning a single answer as fast as possible, even if this causes 4904 * more power consumption. 4905 */ 4906 ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1, 4907 /** 4908 * Prefer maximizing the throughput of successive frames, for example when 4909 * processing successive frames coming from the camera. 4910 */ 4911 ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2, 4912 } PreferenceCode; 4913 4914 /** 4915 * Device types. 4916 * 4917 * The type of NNAPI device. 4918 */ 4919 typedef enum { 4920 /** The device type cannot be provided. */ 4921 ANEURALNETWORKS_DEVICE_UNKNOWN = 0, 4922 /** The device does not fall into any category below. */ 4923 ANEURALNETWORKS_DEVICE_OTHER = 1, 4924 /** The device runs NNAPI models on single or multi-core CPU. */ 4925 ANEURALNETWORKS_DEVICE_CPU = 2, 4926 /** The device can run NNAPI models and also accelerate graphics APIs such 4927 * as OpenGL ES and Vulkan. */ 4928 ANEURALNETWORKS_DEVICE_GPU = 3, 4929 /** Dedicated accelerator for Machine Learning workloads. */ 4930 ANEURALNETWORKS_DEVICE_ACCELERATOR = 4, 4931 } DeviceTypeCode; 4932 4933 /** 4934 * Result codes. 4935 * 4936 * <p>Any NNAPI function can return any result code, including result codes not 4937 * currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR} 4938 * indicates a failure of some kind.</p> 4939 * 4940 * <p>Additional information about the nature of a failure can be obtained from 4941 * the device log after enabling NNAPI debugging by setting the debug.nn.vlog 4942 * property to 1, e.g., by calling "adb shell setprop debug.nn.vlog 1".</p> 4943 * 4944 * Available since API level 27. 4945 */ 4946 typedef enum { 4947 /** 4948 * Operation was succesful. 4949 */ 4950 ANEURALNETWORKS_NO_ERROR = 0, 4951 4952 /** 4953 * Failure caused by not enough available memory. 4954 */ 4955 ANEURALNETWORKS_OUT_OF_MEMORY = 1, 4956 4957 ANEURALNETWORKS_INCOMPLETE = 2, 4958 4959 /** 4960 * Failure caused by unexpected null argument. 4961 */ 4962 ANEURALNETWORKS_UNEXPECTED_NULL = 3, 4963 4964 /** 4965 * Failure caused by invalid function arguments, invalid model definition, 4966 * invalid execution definition or invalid data at execution time. 4967 */ 4968 ANEURALNETWORKS_BAD_DATA = 4, 4969 4970 /** 4971 * Failure caused by failed model execution. 4972 */ 4973 ANEURALNETWORKS_OP_FAILED = 5, 4974 4975 /** 4976 * Failure caused by object being in the wrong state. 4977 */ 4978 ANEURALNETWORKS_BAD_STATE = 6, 4979 4980 /** 4981 * Failure caused by not being able to map a file into memory. 4982 * This may be caused by a file descriptor not being mappable, or an AHardwareBuffer 4983 * not supported by the device. 4984 * Mitigate by reading its content into memory. 4985 */ 4986 ANEURALNETWORKS_UNMAPPABLE = 7, 4987 4988 /** 4989 * Failure caused by insufficient buffer size provided to a model output. 4990 */ 4991 ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE = 8, 4992 4993 /** 4994 * Failure caused by a device not being available. 4995 */ 4996 ANEURALNETWORKS_UNAVAILABLE_DEVICE = 9, 4997 } ResultCode; 4998 4999 /** 5000 * For {@link ANeuralNetworksModel_setOperandValue}, values with a 5001 * length smaller or equal to this will be immediately copied into 5002 * the model. The size is in bytes. 5003 * 5004 * Available since API level 27. 5005 */ 5006 enum { ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES = 128 }; 5007 5008 /** 5009 * For {@link ANeuralNetworksCompilation_setCaching}, specify the size 5010 * of the cache token required from the application. The size is in bytes. 5011 * 5012 * Available since API level 29. 5013 */ 5014 enum { ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN = 32 }; 5015 5016 /** 5017 * ANeuralNetworksMemory is an opaque type that represents memory. 5018 * 5019 * This type is used to represent shared memory, memory mapped files, 5020 * and similar memories. 5021 * 5022 * By using shared memory, a program can efficiently communicate to the 5023 * runtime and drivers the tensors that define a model. See 5024 * {@link ANeuralNetworksModel_setOperandValueFromMemory}. An application 5025 * should typically create one shared memory object that contains every constant tensor 5026 * needed to define a model. {@link ANeuralNetworksMemory_createFromFd} can be used to 5027 * create shared memory from a file handle. 5028 * {@link ANeuralNetworksMemory_createFromAHardwareBuffer} can be used to 5029 * create shared memory from an AHardwareBuffer handle. 5030 * 5031 * Memory objects can also be used to specify the input and output arguments of 5032 * an execution. See {@link ANeuralNetworksExecution_setInputFromMemory} 5033 * and {@link ANeuralNetworksExecution_setOutputFromMemory}. 5034 * 5035 * When calling {@link ANeuralNetworksModel_setOperandValueFromMemory}, 5036 * {@link ANeuralNetworksExecution_setInputFromMemory} and 5037 * {@link ANeuralNetworksExecution_setOutputFromMemory}, each operand in the shared 5038 * memory object must be aligned on a boundary of a byte size that is a multiple 5039 * of the element type byte size, e.g., a tensor with 5040 * {@link ANEURALNETWORKS_TENSOR_FLOAT32} type must be aligned on 4-byte boundary. 5041 * 5042 * Available since API level 27. 5043 */ 5044 typedef struct ANeuralNetworksMemory ANeuralNetworksMemory; 5045 5046 /** 5047 * ANeuralNetworksModel is an opaque type that contains a description of the 5048 * mathematical operations that constitute the model. 5049 * 5050 * <p>Build the model by calling<ul> 5051 * <li>{@link ANeuralNetworksModel_create}</li> 5052 * <li>{@link ANeuralNetworksModel_addOperation}</li> 5053 * <li>{@link ANeuralNetworksModel_addOperand}</li> 5054 * </ul> 5055 * 5056 * This forms a graph in which each operation and operand is a node, a 5057 * directed edge from an operand to an operation indicates that the 5058 * operand is an input to the operation, and a directed edge from an 5059 * operation to an operand indicates that the operand is an output 5060 * from the operation. This graph must be acyclic. 5061 * 5062 * A model is completed by calling {@link ANeuralNetworksModel_finish}. 5063 * A model is destroyed by calling {@link ANeuralNetworksModel_free}. 5064 * 5065 * <p>A model cannot be modified once {@link ANeuralNetworksModel_finish} 5066 * has been called on it.</p> 5067 * 5068 * <p>It is the application's responsibility to make sure that only one thread 5069 * modifies a model at a given time. It is however safe for more than one 5070 * thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p> 5071 * 5072 * <p>It is also the application's responsibility to ensure that there are no other 5073 * uses of the model after calling {@link ANeuralNetworksModel_free}. 5074 * This includes any compilation or execution object created using the model.</p> 5075 * 5076 * Available since API level 27. 5077 */ 5078 typedef struct ANeuralNetworksModel ANeuralNetworksModel; 5079 5080 /** 5081 * ANeuralNetworksCompilation is an opaque type that can be used to compile 5082 * a machine learning model. 5083 * 5084 * <p>To use:<ul> 5085 * <li>Create a new compilation instance by calling the 5086 * {@link ANeuralNetworksCompilation_create} function or 5087 * {@link ANeuralNetworksCompilation_createForDevices}.</li> 5088 * <li>Set any desired properties on the compilation (for example, 5089 * {@link ANeuralNetworksCompilation_setPreference}).</li> 5090 * <li>Optionally, set the caching signature and the cache directory on the 5091 * compilation by calling {@link ANeuralNetworksCompilation_setCaching}.</li> 5092 * <li>Complete the compilation with {@link ANeuralNetworksCompilation_finish}.</li> 5093 * <li>Use the compilation as many times as needed 5094 * with {@link ANeuralNetworksExecution_create} and 5095 * {@link ANeuralNetworksBurst_create}.</li> 5096 * <li>Destroy the compilation with {@link ANeuralNetworksCompilation_free} 5097 * once all executions using the compilation have completed.</li></ul></p> 5098 * 5099 * A compilation is completed by calling {@link ANeuralNetworksCompilation_finish}. 5100 * A compilation is destroyed by calling {@link ANeuralNetworksCompilation_free}. 5101 * 5102 * <p>A compilation cannot be modified once {@link ANeuralNetworksCompilation_finish} 5103 * has been called on it.</p> 5104 * 5105 * <p>It is the application's responsibility to make sure that only 5106 * one thread modifies a compilation at a given time. It is however 5107 * safe for more than one thread to use the compilation once 5108 * {@link ANeuralNetworksCompilation_finish} has returned.</p> 5109 * 5110 * <p>It is also the application's responsibility to ensure that there are no other 5111 * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}. 5112 * This includes any execution object created using the compilation.</p> 5113 * 5114 * Available since API level 27. 5115 */ 5116 typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation; 5117 5118 /** 5119 * ANeuralNetworksExecution is an opaque type that can be used to apply a machine 5120 * learning model to a set of inputs. 5121 * 5122 * <p>To use:<ul> 5123 * <li>Create a new execution instance by calling the 5124 * {@link ANeuralNetworksExecution_create} function.</li> 5125 * <li>Associate input buffers or memory regions to the model inputs with 5126 * {@link ANeuralNetworksExecution_setInput} or 5127 * {@link ANeuralNetworksExecution_setInputFromMemory}.</li> 5128 * <li>Associate output buffers or memory regions to the model outputs with 5129 * {@link ANeuralNetworksExecution_setOutput} or 5130 * {@link ANeuralNetworksExecution_setOutputFromMemory}.</li> 5131 * <li>Apply the model with one of the following:</li><ul> 5132 * <li>Asynchronously with {@link ANeuralNetworksExecution_startCompute}, 5133 * waiting for the execution to complete with 5134 * {@link ANeuralNetworksEvent_wait}.</li> 5135 * <li>Synchronously with {@link ANeuralNetworksExecution_compute}.</li> 5136 * <li>Synchronously as part of an execution burst with 5137 * {@link ANeuralNetworksExecution_burstCompute}.</li></ul> 5138 * <li>Destroy the execution with 5139 * {@link ANeuralNetworksExecution_free}.</li></ul></p> 5140 * 5141 * <p>An output buffer or memory region must not overlap with any 5142 * other output buffer or memory region, with an input buffer or 5143 * memory region, or with an operand value in a memory object 5144 * ({@link ANeuralNetworksModel_setOperandValueFromMemory}).</p> 5145 * 5146 * <p>An execution cannot be modified once 5147 * {@link ANeuralNetworksExecution_compute} or 5148 * {@link ANeuralNetworksExecution_startCompute} has been called on it.</p> 5149 * 5150 * <p>An execution can be applied to a model with 5151 * {@link ANeuralNetworksExecution_compute} or 5152 * {@link ANeuralNetworksExecution_startCompute} only once. Create new 5153 * executions to do new evaluations of the model.</p> 5154 * 5155 * <p>It is the application's responsibility to make sure that only one thread 5156 * modifies an execution at a given time. It is however safe for more than one 5157 * thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p> 5158 * 5159 * <p>It is also the application's responsibility to ensure that there are no other 5160 * uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p> 5161 * 5162 * <p>Multiple executions can be scheduled and evaluated concurrently, either by 5163 * means of {@link ANeuralNetworksExecution_compute} (which is synchronous) in 5164 * different threads or by means of 5165 * {@link ANeuralNetworksExecution_startCompute} (which is asynchronous). The 5166 * runtime makes no guarantee on the ordering of completion of executions. If 5167 * it's important to the application, the application should enforce the 5168 * ordering by ensuring that one execution completes before the next is 5169 * scheduled (for example, by scheduling all executions synchronously within a 5170 * single thread, or by scheduling all executions asynchronously and using 5171 * {@link ANeuralNetworksEvent_wait} between calls to 5172 * {@link ANeuralNetworksExecution_startCompute}).</p> 5173 * 5174 * Available since API level 27. 5175 */ 5176 typedef struct ANeuralNetworksExecution ANeuralNetworksExecution; 5177 5178 #if __ANDROID_API__ >= __ANDROID_API_Q__ 5179 /** 5180 * Parameters for ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL operand. 5181 */ 5182 typedef struct ANeuralNetworksSymmPerChannelQuantParams { 5183 /* The index of the channel dimension. */ 5184 uint32_t channelDim; 5185 /** The size of the scale array. Should be equal to dimension[channelDim] of the Operand. */ 5186 uint32_t scaleCount; 5187 /** The array of scaling values for each channel. Each value must be greater than zero. */ 5188 const float* scales; 5189 } ANeuralNetworksSymmPerChannelQuantParams; 5190 5191 /** 5192 * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency 5193 * of a rapid sequence of executions. It will likely cause overhead if only used 5194 * for a single execution. 5195 * 5196 * ANeuralNetworksBurst serves as a context object for any number of inferences 5197 * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst 5198 * object and the {@link ANeuralNetworksExecution} objects used with it must all 5199 * have been created from the same {@link ANeuralNetworksCompilation} object. 5200 * 5201 * This object is also used as a hint to drivers, providing insight to the 5202 * lifetime of a rapid sequence of executions. For example, a driver may choose 5203 * to increase the clock frequency of its accelerator for the lifetime of a 5204 * burst object. 5205 * 5206 * <p>To use:<ul> 5207 * <li>Create a new burst object by calling the 5208 * {@link ANeuralNetworksBurst_create} function.</li> 5209 * <li>For each execution:</li><ul> 5210 * <li>Create {@link ANeuralNetworksExecution} and configure its 5211 * properties (see {@link ANeuralNetworksExecution} for details).</li> 5212 * <li>Apply the model synchronously with 5213 * {@link ANeuralNetworksExecution_burstCompute}, reusing the same 5214 * {@link ANeuralNetworksBurst} with the new 5215 * {@link ANeuralNetworksExecution}.</li> 5216 * <li>Use and free the {@link ANeuralNetworksExecution}.</li></ul> 5217 * <li>Destroy the burst with 5218 * {@link ANeuralNetworksBurst_free}.</li></ul></p> 5219 * 5220 * Available since API level 29. 5221 */ 5222 typedef struct ANeuralNetworksBurst ANeuralNetworksBurst; 5223 #endif // __ANDROID_API__ >= __ANDROID_API_Q__ 5224 5225 /** 5226 * ANeuralNetworksOperandType describes the type of an operand. 5227 * 5228 * This structure is used to describe both scalars and tensors. 5229 * 5230 * A tensor operand type with all dimensions specified is "fully 5231 * specified". Whenever possible (i.e., whenever the dimensions are 5232 * known at model construction time), a tensor operand type should be 5233 * (but is not required to be) fully specified, in order to enable the 5234 * best possible performance. 5235 * 5236 * If a tensor operand's type is not fully specified, the dimensions 5237 * of the operand are deduced from the operand types and values of the 5238 * operation for which that operand is an output. 5239 * 5240 * <p>In the following situations, a tensor operand type must be fully 5241 * specified:<ul> 5242 * <li>The operand has a constant value, set by 5243 * {@link ANeuralNetworksModel_setOperandValue} (with a 5244 * non-nullptr buffer) or 5245 * {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li> 5246 * <li>The operand is a model input (see 5247 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}). A 5248 * fully specified tensor operand type must either be provided 5249 * to {@link ANeuralNetworksModel_addOperand}; or it must be 5250 * provided to the corresponding 5251 * {@link ANeuralNetworksExecution_setInput}, or 5252 * {@link ANeuralNetworksExecution_setInputFromMemory}. 5253 * EXCEPTION: If the input is optional and omitted 5254 * (by passing nullptr for buffer to 5255 * {@link ANeuralNetworksExecution_setInput}) then it need 5256 * not have a fully specified tensor operand type.</li></ul> 5257 * 5258 * A tensor operand type of specified rank but some number of 5259 * unspecified dimensions is represented by setting dimensionCount to 5260 * the rank and each unspecified dimension to 0. 5261 * 5262 * Available since API level 27. 5263 * 5264 * Starting at API level 29, a tensor operand type of unspecified rank is 5265 * represented by setting dimensionCount to 0 and dimensions to NULL (just as if 5266 * it were a scalar operand type). 5267 */ 5268 typedef struct ANeuralNetworksOperandType { 5269 /** 5270 * The data type, e.g ANEURALNETWORKS_FLOAT32. 5271 */ 5272 int32_t type; 5273 5274 /** 5275 * The number of dimensions (rank). 5276 * 5277 * Must be 0 for scalars. 5278 */ 5279 uint32_t dimensionCount; 5280 5281 /** 5282 * The dimensions of the tensor. 5283 * 5284 * Must be nullptr for scalars. 5285 */ 5286 const uint32_t* dimensions; 5287 5288 /** 5289 * These two fields are only used for quantized tensors. 5290 * They must be zero for all other types. 5291 * The dequantized value of each entry is (value - zeroPoint) * scale. 5292 */ 5293 float scale; 5294 int32_t zeroPoint; 5295 } ANeuralNetworksOperandType; 5296 5297 typedef int32_t ANeuralNetworksOperationType; 5298 5299 /** 5300 * ANeuralNetworksEvent is an opaque type that represents an event 5301 * that will be signaled once an execution completes. 5302 * 5303 * Available since API level 27. 5304 */ 5305 typedef struct ANeuralNetworksEvent ANeuralNetworksEvent; 5306 5307 #if __ANDROID_API__ >= __ANDROID_API_Q__ 5308 5309 /** 5310 * ANeuralNetworksDevice is an opaque type that represents a device. 5311 * 5312 * This type is used to query basic properties and supported operations of the corresponding 5313 * device, and control which device(s) a model is to be run on. 5314 * 5315 * Available since API level 29. 5316 */ 5317 typedef struct ANeuralNetworksDevice ANeuralNetworksDevice; 5318 5319 /** 5320 * Get the number of available devices. 5321 * 5322 * @param numDevices Used to return the number of devices. 5323 * 5324 * @return ANEURALNETWORKS_NO_ERROR if successful. 5325 * 5326 * Available since API level 29. 5327 */ 5328 int ANeuralNetworks_getDeviceCount(uint32_t* numDevices) __INTRODUCED_IN(29); 5329 5330 /** 5331 * Get the representation of the specified device. 5332 * 5333 * @param devIndex The index of the specified device. Must be less than the 5334 number of available devices. 5335 * @param device The representation of the specified device. 5336 * The same representation will always be returned for the specified 5337 * device. 5338 * 5339 * @return ANEURALNETWORKS_NO_ERROR if successful. 5340 * 5341 * Available since API level 29. 5342 */ 5343 int ANeuralNetworks_getDevice(uint32_t devIndex, ANeuralNetworksDevice** device) 5344 __INTRODUCED_IN(29); 5345 5346 /** 5347 * Get the name of the specified device. 5348 * 5349 * @param device The representation of the specified device. 5350 * @param name The returned name of the specified device. The name will be in UTF-8 5351 * and will be null-terminated. It will be recognizable as a known device name 5352 * rather than a cryptic string. For devices with feature level 29 and above, the 5353 * format of the name is {VENDOR}-{DEVICE}. For devices with feature level 28 5354 * or lower, the format of the name is undefined. 5355 * The name will remain valid for the duration of the application. 5356 * 5357 * @return ANEURALNETWORKS_NO_ERROR if successful. 5358 * 5359 * Available since API level 29. 5360 */ 5361 int ANeuralNetworksDevice_getName(const ANeuralNetworksDevice* device, const char** name) 5362 __INTRODUCED_IN(29); 5363 5364 /** 5365 * Get the type of a given device. 5366 * 5367 * The device type can be used to help application developers to distribute Machine Learning 5368 * workloads and other workloads such as graphical rendering. 5369 * E.g., for an app which renders AR scenes based on real time object detection results, 5370 * the developer could choose an ACCELERATOR type device for ML workloads, and reserve GPU 5371 * for graphical rendering. 5372 * 5373 * @param device The representation of the specified device. 5374 * @param type The returned {@link DeviceTypeCode} of the specified device. 5375 * 5376 * @return ANEURALNETWORKS_NO_ERROR if successful. 5377 * 5378 * Available since API level 29. 5379 */ 5380 int ANeuralNetworksDevice_getType(const ANeuralNetworksDevice* device, int32_t* type) 5381 __INTRODUCED_IN(29); 5382 5383 /** 5384 * Get the version of the driver implementation of the specified device. 5385 * 5386 * It’s the responsibility of the driver implementor to insure that this version string 5387 * uniquely distinguishes this implementation from all previous implementations. 5388 * 5389 * This version string must not be confused with the feature level which is solely defined 5390 * by {@link ANeuralNetworksDevice_getFeatureLevel}. There is no implicit ordering of the versions. 5391 * For example, it is not possible to filter all drivers older than a certain version. 5392 * 5393 * Application developers may use this version string to avoid or prefer specific driver 5394 * implementations. For example, an application may want to do so because: 5395 * - A specific version of the driver does not provide the required performance, 5396 * perhaps because of a performance regression. 5397 * - A specific version of the driver has a bug or returns results that don’t match 5398 * the minimum precision requirement for the application. 5399 * 5400 * @param device The representation of the specified device. 5401 * @param version The returned version string of the driver for the specified device. The 5402 * string will be in UTF-8 and will be null-terminated. For devices with feature 5403 * level 28 or lower, "UNKNOWN" will be returned. The version string will remain 5404 * valid for the duration of the application. 5405 * 5406 * @return ANEURALNETWORKS_NO_ERROR if successful. 5407 * 5408 * Available since API level 29. 5409 */ 5410 int ANeuralNetworksDevice_getVersion(const ANeuralNetworksDevice* device, const char** version) 5411 __INTRODUCED_IN(29); 5412 5413 /** 5414 * Get the supported NNAPI version of the specified device. 5415 * 5416 * Each device has a supported feature level, which is the most advanced feature this driver 5417 * implements. For example, if the driver implements the features introduced in Android P, 5418 * but does not implement the features introduced after Android P, the value would be 28. 5419 * Developers could decide whether or not the specified device should be used for a Model that 5420 * has certain feature requirements. 5421 * 5422 * @param device The representation of the specified device. 5423 * @param featureLevel The API level of the most advanced feature this driver implements. 5424 * 5425 * @return ANEURALNETWORKS_NO_ERROR if successful. 5426 * 5427 * Available since API level 29. 5428 */ 5429 int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice* device, 5430 int64_t* featureLevel) __INTRODUCED_IN(29); 5431 5432 /** 5433 * Get the supported operations for a specified set of devices. If multiple devices 5434 * are selected, the supported operation list is a union of supported operations of all 5435 * selected devices. 5436 * 5437 * @param model The model to be queried. 5438 * @param devices The set of devices. Must not contain duplicates. 5439 * @param numDevices The number of devices in the set. 5440 * @param supportedOps The boolean array to be filled. True means supported. The size of the 5441 * boolean array must be at least as large as the number of operations 5442 * in the model. The order of elements in the supportedOps array matches 5443 * the order in which the corresponding operations were added to the model. 5444 * 5445 * @return ANEURALNETWORKS_NO_ERROR if successful. 5446 * 5447 * Available since API level 29. 5448 */ 5449 int ANeuralNetworksModel_getSupportedOperationsForDevices( 5450 const ANeuralNetworksModel* model, const ANeuralNetworksDevice* const* devices, 5451 uint32_t numDevices, bool* supportedOps) __INTRODUCED_IN(29); 5452 5453 /** 5454 * Create a {@link ANeuralNetworksCompilation} to compile the given model for a specified set 5455 * of devices. If more than one device is specified, the compilation will 5456 * distribute the workload automatically across the devices. The model must be fully 5457 * supported by the specified set of devices. This means that 5458 * ANeuralNetworksModel_getSupportedOperationsForDevices() must have returned true for every 5459 * operation for that model/devices pair. 5460 * 5461 * The user must handle all compilation and execution failures from the 5462 * specified set of devices. This is in contrast to a use of {@link 5463 * ANeuralNetworksCompilation_create}, where the runtime will attempt to recover 5464 * from such failures. 5465 * 5466 * @param model The {@link ANeuralNetworksModel} to be compiled. 5467 * @param devices The set of devices. Must not contain duplicates. 5468 * @param numDevices The number of devices in the set. 5469 * @param compilation The newly created object or NULL if unsuccessful. 5470 * 5471 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA 5472 * if the model is invalid. 5473 * 5474 * Available since API level 29. 5475 */ 5476 int ANeuralNetworksCompilation_createForDevices(ANeuralNetworksModel* model, 5477 const ANeuralNetworksDevice* const* devices, 5478 uint32_t numDevices, 5479 ANeuralNetworksCompilation** compilation) 5480 __INTRODUCED_IN(29); 5481 5482 /** 5483 * Sets the compilation caching signature and the cache directory. 5484 * 5485 * Provides optional caching information to the runtime for faster repeated 5486 * compilation. 5487 * 5488 * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. 5489 * 5490 * @param compilation The compilation to be modified. 5491 * @param cacheDir The cache directory for the runtime to store and retrieve caching 5492 * data. It is recommended to use the code cache directory provided 5493 * by the Android runtime. If not using the code cache directory, the 5494 * user should choose a directory local to the application, and is 5495 * responsible to managing the cache entries. 5496 * @param token The token provided by the user to specify a model must be of length 5497 * ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN. The user should ensure that 5498 * the token is unique to a model within the application. The NNAPI 5499 * runtime cannot detect token collisions; a collision will result in a 5500 * failed execution or in a successful execution that produces incorrect 5501 * output values. 5502 * 5503 * @return ANEURALNETWORKS_NO_ERROR if successful. 5504 * 5505 * Available since API level 29. 5506 */ 5507 int ANeuralNetworksCompilation_setCaching(ANeuralNetworksCompilation* compilation, 5508 const char* cacheDir, const uint8_t* token) 5509 __INTRODUCED_IN(29); 5510 5511 /** 5512 * Schedule synchronous evaluation of the execution. 5513 * 5514 * <p>Schedules synchronous evaluation of the execution. Returns once the 5515 * execution has completed and the outputs are ready to be consumed. 5516 * </p> 5517 * 5518 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 5519 * 5520 * See {@link ANeuralNetworksExecution_startCompute} for asynchronous execution. 5521 * Synchronous execution incurs lower overhead than asynchronous execution. 5522 * 5523 * Available since API level 29. 5524 * 5525 * @param execution The execution to be scheduled and executed. 5526 * 5527 * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. 5528 * ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot 5529 * be properly mapped. 5530 */ 5531 int ANeuralNetworksExecution_compute(ANeuralNetworksExecution* execution) __INTRODUCED_IN(29); 5532 5533 /** 5534 * Get the dimensional information of the specified output operand of the model of the 5535 * {@link ANeuralNetworksExecution}. 5536 * 5537 * On asynchronous execution initiated by {@link ANeuralNetworksExecution_startCompute}, 5538 * {@link ANeuralNetworksEvent_wait} must be called prior to this function to recuperate 5539 * the resources used by the execution. 5540 * 5541 * @param execution The execution to be queried. 5542 * @param index The index of the output argument we are querying. It is 5543 * an index into the lists passed to 5544 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not 5545 * the index associated with {@link ANeuralNetworksModel_addOperand}. 5546 * @param rank The rank of the output operand. 5547 * 5548 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE 5549 * if the target output is provided an insufficient buffer at execution time, 5550 * ANEURALNETWORKS_BAD_DATA if the index is invalid. 5551 * 5552 * Available since API level 29. 5553 */ 5554 int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution* execution, 5555 int32_t index, uint32_t* rank) 5556 __INTRODUCED_IN(29); 5557 5558 /** 5559 * Get the dimensional information of the specified output operand of the model of the 5560 * {@link ANeuralNetworksExecution}. The target output operand cannot be a scalar. 5561 * 5562 * On asynchronous execution initiated by {@link ANeuralNetworksExecution_startCompute}, 5563 * {@link ANeuralNetworksEvent_wait} must be called prior to this function to recuperate 5564 * the resources used by the execution. 5565 * 5566 * @param execution The execution to be queried. 5567 * @param index The index of the output argument we are querying. It is an index into the lists 5568 * passed to {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not 5569 * the index associated with {@link ANeuralNetworksModel_addOperand}. 5570 * @param dimensions The dimension array to be filled. The size of the array must be exactly as 5571 * large as the rank of the output operand to be queried in the model. 5572 * 5573 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE 5574 * if the target output is provided an insufficient buffer at execution time, 5575 * ANEURALNETWORKS_BAD_DATA if the index is invalid or if the target is a scalar. 5576 * 5577 * Available since API level 29. 5578 */ 5579 int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution* execution, 5580 int32_t index, uint32_t* dimensions) 5581 __INTRODUCED_IN(29); 5582 5583 /** 5584 * Create a {@link ANeuralNetworksBurst} to apply the given compilation. 5585 * This only creates the burst object. Computation is only performed once 5586 * {@link ANeuralNetworksExecution_burstCompute} is invoked with a valid 5587 * {@link ANeuralNetworksExecution} and {@link ANeuralNetworksBurst}. 5588 * 5589 * <p>The provided compilation must outlive the burst object.</p> 5590 * 5591 * Available since API level 29. 5592 * 5593 * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated. 5594 * @param burst The newly created object or NULL if unsuccessful. 5595 * 5596 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA 5597 * if the compilation is invalid. 5598 */ 5599 int ANeuralNetworksBurst_create(ANeuralNetworksCompilation* compilation, 5600 ANeuralNetworksBurst** burst) __INTRODUCED_IN(29); 5601 5602 /** 5603 * Destroys the burst object. 5604 * 5605 * Available since API level 29. 5606 * 5607 * @param burst The burst object to be destroyed. Passing NULL is acceptable and 5608 * results in no operation. 5609 */ 5610 void ANeuralNetworksBurst_free(ANeuralNetworksBurst* burst) __INTRODUCED_IN(29); 5611 5612 /** 5613 * Schedule synchronous evaluation of the execution on a burst object. 5614 * 5615 * <p>Schedules synchronous evaluation of the execution. Returns once the 5616 * execution has completed and the outputs are ready to be consumed.</p> 5617 * 5618 * <p>There must be at most one {@link ANeuralNetworksExecution} processing at 5619 * any given time for any given burst object. Any 5620 * {@link ANeuralNetworksExecution} launched before the previous has finished 5621 * will result in ANEURALNETWORKS_BAD_STATE.</p> 5622 * 5623 * Available since API level 29. 5624 * 5625 * @param burst The burst object to execute on. 5626 * @param execution The execution to be scheduled and executed. The execution 5627 * must be created from the same {@link 5628 * ANeuralNetworksCompilation} as the burst object. 5629 * 5630 * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. 5631 */ 5632 int ANeuralNetworksExecution_burstCompute(ANeuralNetworksExecution* execution, 5633 ANeuralNetworksBurst* burst) __INTRODUCED_IN(29); 5634 5635 /** 5636 * Creates a shared memory object from an AHardwareBuffer handle. 5637 * 5638 * If the shared memory is backed by an AHardwareBuffer of AHARDWAREBUFFER_FORMAT_BLOB 5639 * format, it can be used the same way as shared memory created from a file handle. See 5640 * {@link ANeuralNetworksMemory} for a description on how to use this shared memory. 5641 * 5642 * If the shared memory is backed by an AHardwareBuffer of a format other than 5643 * AHARDWAREBUFFER_FORMAT_BLOB, it can only be used for Model inputs and outputs. 5644 * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or 5645 * {@link ANeuralNetworksExecution_setOutputFromMemory} with the shared memory, both 5646 * offset and length must be set to zero and the entire memory region will be 5647 * associated with the specified input or output operand. There is no guarantee 5648 * that an arbitrary AHardwareBuffer_Format and AHardwareBuffer_UsageFlags combination 5649 * can be used by arbitrary devices. The execution will fail if selected set of devices 5650 * cannot consume the buffer. 5651 * 5652 * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with shared memory 5653 * backed by an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB is 5654 * disallowed. 5655 * 5656 * TODO(miaowang): add documentation about intended usage with introspection API. 5657 * 5658 * Available since API level 29. 5659 * 5660 * @param ahwb The AHardwareBuffer handle. 5661 * @param memory The memory object to be created. 5662 * Set to NULL if unsuccessful. 5663 * 5664 * @return ANEURALNETWORKS_NO_ERROR if the request completed normally. 5665 * 5666 * @see AHardwareBuffer 5667 */ 5668 int ANeuralNetworksMemory_createFromAHardwareBuffer(const AHardwareBuffer* ahwb, 5669 ANeuralNetworksMemory** memory) 5670 __INTRODUCED_IN(29); 5671 5672 /** 5673 5674 * Specifies whether duration of the {@link ANeuralNetworksExecution} is to be 5675 * measured. Evaluation of the execution must not have been scheduled. 5676 * 5677 * By default, duration is not measured. 5678 * 5679 * The {@link ANeuralNetworksExecution} must have been created with 5680 * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1. 5681 * 5682 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 5683 * 5684 * Available since API level 29. 5685 * 5686 * @param execution The execution to be modified. 5687 * @param measure 'true' if duration is to be measured, 'false' if not. 5688 * 5689 * @return ANEURALNETWORKS_NO_ERROR if successful. 5690 */ 5691 int ANeuralNetworksExecution_setMeasureTiming(ANeuralNetworksExecution* execution, bool measure) 5692 __INTRODUCED_IN(29); 5693 5694 /** 5695 * Different duration measurements. 5696 * 5697 * Durations are measured in nanoseconds. 5698 * 5699 * Available since API level 29. 5700 */ 5701 typedef enum { 5702 // Execution time on hardware (not driver, which runs on host processor). 5703 ANEURALNETWORKS_DURATION_ON_HARDWARE = 0, 5704 // Execution time in driver (including time on hardware). Excludes overhead 5705 // such as that of the runtime itself and the IPC needed for the runtime to 5706 // communicate with the driver. 5707 ANEURALNETWORKS_DURATION_IN_DRIVER = 1, 5708 } DurationCode; 5709 5710 /** 5711 * Get the time spent in the specified {@link ANeuralNetworksExecution}, in nanoseconds. 5712 * The execution must have completed. 5713 * 5714 * Available since API level 29. 5715 * 5716 * @param execution The execution to be queried. 5717 * @param durationCode The measurement to be queried, specified by {@link DurationCode}. 5718 * @param duration The returned duration. If no measurement was requested by 5719 * {@link ANeuralNetworksExecution_setMeasureTiming}, or for some other 5720 * reason the duration is not available, UINT64_MAX will be returned. 5721 * A particular device need not support any given measurement. 5722 * 5723 * @return ANEURALNETWORKS_NO_ERROR if successful. 5724 */ 5725 int ANeuralNetworksExecution_getDuration(const ANeuralNetworksExecution* execution, 5726 int32_t durationCode, uint64_t* duration) 5727 __INTRODUCED_IN(29); 5728 5729 #endif // __ANDROID_API__ >= __ANDROID_API_Q__ 5730 5731 #if __ANDROID_API__ >= 27 5732 5733 /** 5734 * Creates a shared memory object from a file descriptor. 5735 * 5736 * The shared memory is backed by a file descriptor via mmap. 5737 * See {@link ANeuralNetworksMemory} for a description on how to use 5738 * this shared memory. 5739 * 5740 * Available since API level 27. 5741 * 5742 * @param size The requested size in bytes. 5743 * Must not be larger than the file size. 5744 * @param prot The desired memory protection for the mapping. 5745 * It is either PROT_NONE or the bitwise OR of one or 5746 * more of the following flags: PROT_READ, PROT_WRITE. 5747 * @param fd The requested file descriptor. 5748 * The file descriptor has to be mmap-able. The file 5749 * descriptor will be duplicated. 5750 * @param offset The offset to the beginning of the file of the area to map. 5751 * The offset has to be aligned to a page size. 5752 * @param memory The memory object to be created. 5753 * Set to NULL if unsuccessful. 5754 * 5755 * @return ANEURALNETWORKS_NO_ERROR if the request completed normally. 5756 */ 5757 int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset, 5758 ANeuralNetworksMemory** memory) __INTRODUCED_IN(27); 5759 5760 /** 5761 * Delete a memory object. 5762 * 5763 * Destroys the object used by the run time to keep track of the memory. 5764 * This will free the underlying actual memory if no other code has open 5765 * handles to this memory. 5766 * 5767 * Available since API level 27. 5768 * 5769 * @param memory The memory object to be freed. 5770 */ 5771 void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) __INTRODUCED_IN(27); 5772 5773 /** 5774 * Create an empty {@link ANeuralNetworksModel}. 5775 * 5776 * <p>This only creates the object. Computation is performed once 5777 * {@link ANeuralNetworksExecution_compute} or 5778 * {@link ANeuralNetworksExecution_startCompute} is invoked. 5779 * 5780 * The model should be constructed with calls to 5781 * {@link ANeuralNetworksModel_addOperation} and 5782 * {@link ANeuralNetworksModel_addOperand} 5783 * 5784 * <p>{@link ANeuralNetworksModel_finish} should be called once the model 5785 * has been fully constructed.</p> 5786 * 5787 * <p>{@link ANeuralNetworksModel_free} should be called once the model 5788 * is no longer needed.</p> 5789 * 5790 * Available since API level 27. 5791 * 5792 * @param model The {@link ANeuralNetworksModel} to be created. 5793 * Set to NULL if unsuccessful. 5794 * 5795 * @return ANEURALNETWORKS_NO_ERROR if successful. 5796 */ 5797 int ANeuralNetworksModel_create(ANeuralNetworksModel** model) __INTRODUCED_IN(27); 5798 5799 /** 5800 * Destroy a model. 5801 * 5802 * The model need not have been finished by a call to 5803 * {@link ANeuralNetworksModel_finish}. 5804 * 5805 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 5806 * 5807 * Available since API level 27. 5808 * 5809 * @param model The model to be destroyed. Passing NULL is acceptable and 5810 * results in no operation. 5811 */ 5812 void ANeuralNetworksModel_free(ANeuralNetworksModel* model) __INTRODUCED_IN(27); 5813 5814 /** 5815 * Indicate that we have finished modifying a model. Required before 5816 * calling {@link ANeuralNetworksCompilation_create} and 5817 * {@link ANeuralNetworksCompilation_createForDevices}. 5818 * 5819 * An application is responsible to make sure that no other thread uses 5820 * the model at the same time. 5821 * 5822 * This function must only be called once for a given model. 5823 * 5824 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 5825 * 5826 * Available since API level 27. 5827 * 5828 * @param model The model to be finished. 5829 * 5830 * @return ANEURALNETWORKS_NO_ERROR if successful. 5831 */ 5832 int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) __INTRODUCED_IN(27); 5833 5834 /** 5835 * Add an operand to a model. 5836 * 5837 * The order in which the operands are added is important. The first one added 5838 * to a model will have the index value 0, the second 1, etc. These indexes are 5839 * used as operand identifiers in 5840 * {@link ANeuralNetworksModel_addOperation}, 5841 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}, 5842 * {@link ANeuralNetworksModel_setOperandValue}, 5843 * {@link ANeuralNetworksModel_setOperandValueFromMemory}, 5844 * {@link ANeuralNetworksExecution_setInput}, 5845 * {@link ANeuralNetworksExecution_setInputFromMemory}, 5846 * {@link ANeuralNetworksExecution_setOutput}, 5847 * {@link ANeuralNetworksExecution_setOutputFromMemory} and 5848 * {@link ANeuralNetworksExecution_setOperandValue}. 5849 * 5850 * <p>Every operand must be referenced in exactly one of the following 5851 * ways:<ul> 5852 * <li>It is identified as a model input with 5853 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</li> 5854 * <li>It is identified as a constant with 5855 * {@link ANeuralNetworksModel_setOperandValue} or 5856 * {@link ANeuralNetworksModel_setOperandValueFromMemory}.</li> 5857 * <li>It is identified as an output of exactly one operation with 5858 * {@link ANeuralNetworksModel_addOperation}.</li></p> 5859 * <p>An operand that is identified as a model input or as a constant 5860 * must not also be identified as a model output with 5861 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}.</p> 5862 * 5863 * To build a model that can accommodate inputs of various sizes, as 5864 * you may want to do for a CNN, leave unspecified the dimensions that 5865 * will vary at run time. If you do so, fully specify dimensions 5866 * when calling {@link ANeuralNetworksExecution_setInput} or 5867 * {@link ANeuralNetworksExecution_setInputFromMemory}. 5868 * 5869 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been 5870 * called will return an error. 5871 * 5872 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 5873 * 5874 * Available since API level 27. 5875 * 5876 * @param model The model to be modified. 5877 * @param type The {@link ANeuralNetworksOperandType} that describes the shape 5878 * of the operand. Neither the {@link ANeuralNetworksOperandType} 5879 * nor the dimensions it points to need to outlive the call to 5880 * {@link ANeuralNetworksModel_addOperand}. 5881 * 5882 * @return ANEURALNETWORKS_NO_ERROR if successful. 5883 */ 5884 int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model, 5885 const ANeuralNetworksOperandType* type) __INTRODUCED_IN(27); 5886 5887 /** 5888 * Sets an operand to a constant value. 5889 * 5890 * Values of length smaller or equal to 5891 * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES} 5892 * are immediately copied into the model. 5893 * 5894 * For values of length greater than {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}, 5895 * a pointer to the buffer is stored within the model. The application is responsible 5896 * for not changing the content of this region until all executions using this model 5897 * have completed. As the data may be copied during processing, modifying the data 5898 * after this call yields undefined results. 5899 * 5900 * For large tensors, using {@link ANeuralNetworksModel_setOperandValueFromMemory} 5901 * is likely to be more efficient. 5902 * 5903 * To indicate that an optional operand should be considered missing, 5904 * pass nullptr for buffer and 0 for length. 5905 * 5906 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been 5907 * called will return an error. 5908 * 5909 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 5910 * 5911 * Available since API level 27. 5912 * 5913 * @param model The model to be modified. 5914 * @param index The index of the model operand we're setting. 5915 * @param buffer A pointer to the data to use. 5916 * @param length The size in bytes of the data value. 5917 * 5918 * @return ANEURALNETWORKS_NO_ERROR if successful. 5919 */ 5920 int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, int32_t index, 5921 const void* buffer, size_t length) __INTRODUCED_IN(27); 5922 5923 #if __ANDROID_API__ >= __ANDROID_API_Q__ 5924 5925 /** 5926 * Sets an operand's per channel quantization parameters. 5927 * 5928 * Sets parameters required by a tensor of type 5929 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}. 5930 * This function must be called for every tensor of type 5931 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} before 5932 * calling {@link ANeuralNetworksModel_finish}. 5933 * 5934 * Available since API level 29. 5935 * 5936 * @param model The model to be modified. 5937 * @param index The index of the model operand we're setting. 5938 * @param channelQuant The per channel quantization parameters for the operand. 5939 * No memory in this struct needs to outlive the call to 5940 * this function. 5941 * 5942 * @return ANEURALNETWORKS_NO_ERROR if successful. 5943 */ 5944 int ANeuralNetworksModel_setOperandSymmPerChannelQuantParams( 5945 ANeuralNetworksModel* model, int32_t index, 5946 const ANeuralNetworksSymmPerChannelQuantParams* channelQuant) __INTRODUCED_IN(29); 5947 5948 #endif // __ANDROID_API__ >= __ANDROID_API_Q__ 5949 5950 /** 5951 * Sets an operand to a value stored in a memory object. 5952 * 5953 * The content of the memory is not copied. A reference to that memory is stored 5954 * inside the model. The application is responsible for not changing the content 5955 * of the memory region until all executions using this model have completed. 5956 * As the data may be copied during processing, modifying the data after this call 5957 * yields undefined results. 5958 * 5959 * To indicate that an optional operand should be considered missing, 5960 * use {@link ANeuralNetworksModel_setOperandValue} instead, passing nullptr for buffer. 5961 * 5962 * Is disallowed to set an operand value with shared memory backed by an AHardwareBuffer 5963 * of a format other than AHARDWAREBUFFER_FORMAT_BLOB. 5964 * 5965 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been 5966 * called will return an error. 5967 * 5968 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 5969 * See {@link ANeuralNetworksMemory_createFromAHardwarBuffer} for information on 5970 * AHardwareBuffer usage. 5971 * 5972 * Available since API level 27. 5973 * 5974 * @param model The model to be modified. 5975 * @param index The index of the model operand we're setting. 5976 * @param buffer A pointer to the data to use. 5977 * @param memory The memory containing the data. 5978 * @param offset This specifies the location of the data within the memory. 5979 * The offset is in bytes from the start of memory. 5980 * @param length The size in bytes of the data value. 5981 * 5982 * @return ANEURALNETWORKS_NO_ERROR if successful. 5983 */ 5984 int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel* model, int32_t index, 5985 const ANeuralNetworksMemory* memory, 5986 size_t offset, size_t length) 5987 __INTRODUCED_IN(27); 5988 5989 /** 5990 * Add an operation to a model. 5991 * 5992 * @param model The model to be modified. 5993 * @param type The {@link ANeuralNetworksOperationType} of the operation. 5994 * @param inputCount The number of entries in the inputs array. 5995 * @param inputs An array of indexes identifying each operand. 5996 * @param outputCount The number of entries in the outputs array. 5997 * @param outputs An array of indexes identifying each operand. 5998 * 5999 * The operands specified by inputs and outputs must have been 6000 * previously added by calls to {@link ANeuralNetworksModel_addOperand}. 6001 * 6002 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been 6003 * called will return an error. 6004 * 6005 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 6006 * 6007 * Available since API level 27. 6008 * 6009 * @return ANEURALNETWORKS_NO_ERROR if successful. 6010 */ 6011 int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model, 6012 ANeuralNetworksOperationType type, uint32_t inputCount, 6013 const uint32_t* inputs, uint32_t outputCount, 6014 const uint32_t* outputs) __INTRODUCED_IN(27); 6015 6016 /** 6017 * Specifies which operands will be the model's inputs and 6018 * outputs. Every model must have at least one input and one output. 6019 * 6020 * An operand cannot be used for both input and output. Doing so will 6021 * return an error. 6022 * 6023 * @param model The model to be modified. 6024 * @param inputCount The number of entries in the inputs array. 6025 * @param inputs An array of indexes identifying the input operands. 6026 * @param outputCount The number of entries in the outputs array. 6027 * @param outputs An array of indexes identifying the output operands. 6028 * 6029 * The operands specified by inputs and outputs must have been 6030 * previously added by calls to {@link ANeuralNetworksModel_addOperand}. 6031 * 6032 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been 6033 * called will return an error. 6034 * 6035 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 6036 * 6037 * Available since API level 27. 6038 * 6039 */ 6040 int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel* model, uint32_t inputCount, 6041 const uint32_t* inputs, uint32_t outputCount, 6042 const uint32_t* outputs) __INTRODUCED_IN(27); 6043 6044 #if __ANDROID_API__ >= 28 6045 6046 /** 6047 * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be 6048 * calculated with range and/or precision as low as that of the IEEE 754 16-bit 6049 * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32} 6050 * must be calculated using at least the range and precision of the IEEE 754 6051 * 32-bit floating-point format. 6052 * 6053 * @param model The model to be modified. 6054 * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be 6055 * calculated with range and/or precision as low as that of the 6056 * IEEE 754 16-bit floating point format. 'false' indicates 6057 * {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using 6058 * at least the range and precision of the IEEE 754 32-bit floating 6059 * point format. 6060 * 6061 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been 6062 * called will return an error. 6063 * 6064 * Available since API level 28. 6065 * 6066 * See {@link ANeuralNetworksModel} for information on multithreaded usage. 6067 */ 6068 int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* model, bool allow) 6069 __INTRODUCED_IN(28); 6070 6071 #endif // __ANDROID_API__ >= 28 6072 6073 /** 6074 * Create a {@link ANeuralNetworksCompilation} to compile the given model. 6075 * 6076 * <p>This only creates the object. Compilation is only performed once 6077 * {@link ANeuralNetworksCompilation_finish} is invoked.</p> 6078 * 6079 * <p>{@link ANeuralNetworksCompilation_finish} should be called once 6080 * all desired properties have been set on the compilation.</p> 6081 * 6082 * <p>{@link ANeuralNetworksModel_free} should be called once the compilation 6083 * is no longer needed.</p> 6084 * 6085 * <p>The provided model must outlive the compilation.</p> 6086 * 6087 * The model must already have been finished by a call to 6088 * {@link ANeuralNetworksModel_finish}. 6089 * 6090 * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. 6091 * 6092 * Available since API level 27. 6093 * 6094 * @param model The {@link ANeuralNetworksModel} to be compiled. 6095 * @param compilation The newly created object or NULL if unsuccessful. 6096 * 6097 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA 6098 * if the model is invalid. 6099 */ 6100 int ANeuralNetworksCompilation_create(ANeuralNetworksModel* model, 6101 ANeuralNetworksCompilation** compilation) __INTRODUCED_IN(27); 6102 6103 /** 6104 * Destroy a compilation. 6105 * 6106 * The compilation need not have been finished by a call to 6107 * {@link ANeuralNetworksModel_finish}. 6108 * 6109 * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. 6110 * 6111 * Available since API level 27. 6112 * 6113 * @param compilation The compilation to be destroyed. Passing NULL is acceptable and 6114 * results in no operation. 6115 */ 6116 void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation) __INTRODUCED_IN(27); 6117 6118 /** 6119 * Sets the execution preference. 6120 * 6121 * <p>Provides guidance to the runtime when trade-offs are possible.</p> 6122 * 6123 * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. 6124 * 6125 * Available since API level 27. 6126 * 6127 * @param compilation The compilation to be modified. 6128 * @param preference Either {@link PREFER_LOW_POWER}, 6129 * {@link PREFER_SINGLE_FAST_ANSWER}, or 6130 * {@link PREFER_SUSTAINED_SPEED}. 6131 * 6132 * @return ANEURALNETWORKS_NO_ERROR if successful. 6133 */ 6134 int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation* compilation, 6135 int32_t preference) __INTRODUCED_IN(27); 6136 6137 /** 6138 * Indicate that we have finished modifying a compilation. Required before 6139 * calling {@link ANeuralNetworksExecution_create}. 6140 * 6141 * An application is responsible to make sure that no other thread uses 6142 * the compilation at the same time. 6143 * 6144 * This function must only be called once for a given compilation. 6145 * 6146 * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. 6147 * 6148 * Available since API level 27. 6149 * 6150 * @param compilation The compilation to be finished. 6151 * 6152 * @return ANEURALNETWORKS_NO_ERROR if successful. 6153 */ 6154 int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation* compilation) __INTRODUCED_IN(27); 6155 6156 /** 6157 * Create a {@link ANeuralNetworksExecution} to apply the given compilation. 6158 * This only creates the object. Computation is only performed once 6159 * {@link ANeuralNetworksExecution_compute} or 6160 * {@link ANeuralNetworksExecution_startCompute} is invoked. 6161 * 6162 * <p>The provided compilation must outlive the execution.</p> 6163 * 6164 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 6165 * 6166 * Available since API level 27. 6167 * 6168 * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated. 6169 * @param execution The newly created object or NULL if unsuccessful. 6170 * 6171 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA 6172 * if the compilation is invalid. 6173 */ 6174 int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation, 6175 ANeuralNetworksExecution** execution) __INTRODUCED_IN(27); 6176 6177 /** 6178 * Destroy an execution. 6179 * 6180 * <p>If called on an execution for which 6181 * {@link ANeuralNetworksExecution_startCompute} has been called, the 6182 * function will return immediately but will mark the execution to be deleted 6183 * once the computation completes. The related {@link ANeuralNetworksEvent} 6184 * will be signaled and the {@link ANeuralNetworksEvent_wait} will return 6185 * ANEURALNETWORKS_ERROR_DELETED. 6186 * 6187 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 6188 * 6189 * Available since API level 27. 6190 * 6191 * @param execution The execution to be destroyed. Passing NULL is acceptable and 6192 * results in no operation. 6193 */ 6194 void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) __INTRODUCED_IN(27); 6195 6196 /** 6197 * Associate a user buffer with an input of the model of the 6198 * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have 6199 * been scheduled. 6200 * 6201 * <p>The provided buffer must outlive the execution.</p> 6202 * 6203 * If the input is optional, you can indicate that it is omitted by 6204 * passing nullptr for buffer and 0 for length. 6205 * 6206 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 6207 * 6208 * Available since API level 27. 6209 * 6210 * @param execution The execution to be modified. 6211 * @param index The index of the input argument we are setting. It is 6212 * an index into the lists passed to 6213 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not 6214 * the index associated with 6215 * {@link ANeuralNetworksModel_addOperand}. 6216 * @param type The {@link ANeuralNetworksOperandType} of the 6217 * operand. Unless the input is omitted, this should be 6218 * used to specify the dimensions that were left 6219 * unspecified when the operand was added to the 6220 * model. All other properties of the type must be the 6221 * same as specified in the model. If the type is the same 6222 * as specified when the model was built, NULL can be 6223 * passed. Neither the {@link ANeuralNetworksOperandType} 6224 * nor the dimensions it points to need to outlive the call 6225 * to {@link ANeuralNetworksExecution_setInput}. 6226 * @param buffer The buffer containing the data. 6227 * @param length The length in bytes of the buffer. 6228 * 6229 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the 6230 * name is not recognized or the buffer is too small for the input. 6231 */ 6232 int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32_t index, 6233 const ANeuralNetworksOperandType* type, const void* buffer, 6234 size_t length) __INTRODUCED_IN(27); 6235 6236 /** 6237 * Associate part of a memory object with an input of the model of the 6238 * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have 6239 * been scheduled. 6240 * 6241 * <p>The provided memory must outlive the execution.</p> 6242 * 6243 * If the input is optional, you can indicate that it is omitted by 6244 * using {@link ANeuralNetworksExecution_setInput} instead, passing nullptr for 6245 * buffer and 0 for length. 6246 * 6247 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 6248 * See {@link ANeuralNetworksMemory_createFromAHardwarBuffer} for information on 6249 * AHardwareBuffer usage. 6250 * 6251 * Available since API level 27. 6252 * 6253 * @param execution The execution to be modified. 6254 * @param index The index of the input argument we are setting. It is 6255 * an index into the lists passed to 6256 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not 6257 * the index associated with {@link ANeuralNetworksModel_addOperand}. 6258 * @param type The {@link ANeuralNetworksOperandType} of the 6259 * operand. This should be used to specify the dimensions 6260 * that were left unspecified when the operand was added 6261 * to the model. All other properties of the type must be 6262 * the same as specified in the model. If the type is the 6263 * same as specified when the model was built, NULL can be 6264 * passed. Neither the {@link ANeuralNetworksOperandType} 6265 * nor the dimensions it points to need to outlive the call 6266 * to {@link ANeuralNetworksExecution_setInputFromMemory}. 6267 * @param memory The memory containing the data. 6268 * @param offset This specifies the location of the data within the memory. 6269 * The offset is in bytes from the start of memory. 6270 * @param length The size in bytes of the data value. 6271 * 6272 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the 6273 * name is not recognized or the buffer is too small for the input. 6274 */ 6275 int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execution, int32_t index, 6276 const ANeuralNetworksOperandType* type, 6277 const ANeuralNetworksMemory* memory, size_t offset, 6278 size_t length) __INTRODUCED_IN(27); 6279 6280 /** 6281 * Associate a user buffer with an output of the model of the 6282 * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have 6283 * been scheduled. 6284 * 6285 * If the output is optional, you can indicate that it is omitted by 6286 * passing nullptr for buffer and 0 for length. 6287 * 6288 * <p>The provided buffer must outlive the execution.</p> 6289 * 6290 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 6291 * 6292 * Available since API level 27. 6293 * 6294 * @param execution The execution to be modified. 6295 * @param index The index of the output argument we are setting. It is 6296 * an index into the lists passed to 6297 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not 6298 * the index associated with {@link ANeuralNetworksModel_addOperand}. 6299 * @param type The {@link ANeuralNetworksOperandType} of the 6300 * operand. Unless the output is omitted, this should be 6301 * used to specify the dimensions that were left 6302 * unspecified when the operand was added to the 6303 * model. All other properties of the type must be the 6304 * same as specified in the model. If the type is the same 6305 * as specified when the model was built, NULL can be 6306 * passed. Neither the {@link ANeuralNetworksOperandType} 6307 * nor the dimensions it points to need to outlive the call 6308 * to {@link ANeuralNetworksExecution_setOutput}. 6309 * Since API level 29, the output operand can have unspecified 6310 * dimensions or rank to be deduced dynamically during the execution. 6311 * However, the user must provide a large enough buffer. The user 6312 * can retrieve the output dimensional information after the execution 6313 * by {@link ANeuralNetworksExecution_getOutputOperandRank} and 6314 * {@link ANeuralNetworksExecution_getOutputOperandDimensions}. 6315 * @param buffer The buffer where the data is to be written. 6316 * @param length The length in bytes of the buffer. 6317 * 6318 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the 6319 * name is not recognized or the buffer is too small for the output. 6320 */ 6321 int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int32_t index, 6322 const ANeuralNetworksOperandType* type, void* buffer, 6323 size_t length) __INTRODUCED_IN(27); 6324 6325 /** 6326 * Associate part of a memory object with an output of the model of the 6327 * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have 6328 * been scheduled. 6329 * 6330 * If the output is optional, you can indicate that it is omitted by 6331 * using {@link ANeuralNetworksExecution_setOutput} instead, passing nullptr for 6332 * buffer and 0 for length. 6333 * 6334 * <p>The provided memory must outlive the execution.</p> 6335 * 6336 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 6337 * See {@link ANeuralNetworksMemory_createFromAHardwarBuffer} for information on 6338 * AHardwareBuffer usage. 6339 * 6340 * Available since API level 27. 6341 * 6342 * @param execution The execution to be modified. 6343 * @param index The index of the output argument we are setting. It is 6344 * an index into the lists passed to 6345 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not 6346 * the index associated with {@link ANeuralNetworksModel_addOperand}. 6347 * @param type The {@link ANeuralNetworksOperandType} of the operand. This should be 6348 * used to specify the dimensions that were left 6349 * unspecified when the operand was added to the 6350 * model. All other properties of the type must be the 6351 * same as specified in the model. If the type is the same 6352 * as specified when the model was built, NULL can be 6353 * passed. Neither the {@link ANeuralNetworksOperandType} 6354 * nor the dimensions it points to need to outlive the call 6355 * to {@link ANeuralNetworksExecution_setOutputFromMemory}. 6356 * Since API level 29, the output operand can have unspecified 6357 * dimensions or rank to be deduced dynamically during the execution. 6358 * However, the user must provide a large enough memory. The user 6359 * can retrieve the output dimensional information after the execution 6360 * by {@link ANeuralNetworksExecution_getOutputOperandRank} and 6361 * {@link ANeuralNetworksExecution_getOutputOperandDimensions}. 6362 * @param memory The memory where the data is to be stored. 6363 * @param offset This specifies the location of the data within the memory. 6364 * The offset is in bytes from the start of memory. 6365 * @param length The length in bytes of the data value. 6366 * 6367 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if the 6368 * name is not recognized or the buffer is too small for the output. 6369 */ 6370 int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution* execution, int32_t index, 6371 const ANeuralNetworksOperandType* type, 6372 const ANeuralNetworksMemory* memory, size_t offset, 6373 size_t length) __INTRODUCED_IN(27); 6374 6375 /** 6376 * Schedule asynchronous evaluation of the execution. 6377 * 6378 * <p>Schedules asynchronous evaluation of the execution. Once the model has 6379 * been applied and the outputs are ready to be consumed, the returned event 6380 * will be signaled. Use {@link ANeuralNetworksEvent_wait} to wait for that 6381 * event. 6382 * </p> 6383 * 6384 * ANeuralNetworksEvent_wait must be called to recuperate the resources used 6385 * by the execution. 6386 * 6387 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 6388 * 6389 * See {@link ANeuralNetworksExecution_compute} for synchronous execution. 6390 * Synchronous execution incurs lower overhead than asynchronous execution. 6391 * 6392 * Available since API level 27. 6393 * 6394 * @param execution The execution to be scheduled and executed. 6395 * @param event The event that will be signaled on completion. event is set to 6396 * NULL if there's an error. 6397 * 6398 * @return ANEURALNETWORKS_NO_ERROR if successful. 6399 */ 6400 int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution, 6401 ANeuralNetworksEvent** event) __INTRODUCED_IN(27); 6402 6403 /** 6404 * Waits until the execution completes. 6405 * 6406 * More than one thread can wait on an event. When the execution completes, 6407 * all threads will be released. 6408 * 6409 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 6410 * 6411 * Available since API level 27. 6412 * 6413 * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. 6414 * ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot 6415 * be properly mapped. 6416 */ 6417 int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) __INTRODUCED_IN(27); 6418 6419 /** 6420 * Destroys the event. 6421 * 6422 * See {@link ANeuralNetworksExecution} for information on multithreaded usage. 6423 * 6424 * Available since API level 27. 6425 */ 6426 void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) __INTRODUCED_IN(27); 6427 6428 #endif // __ANDROID_API__ >= 27 6429 6430 __END_DECLS 6431 6432 #endif // ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H 6433 6434 /** @} */ 6435