• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /**
17  * @addtogroup NeuralNeworkRuntime
18  * @{
19  *
20  * @brief Provides APIs for accelerating the Neural Network Runtime model inference.
21  *
22  * @Syscap SystemCapability.Ai.NeuralNetworkRuntime
23  * @since 9
24  * @version 1.0
25  */
26 
27 /**
28  * @file neural_network_runtime_type.h
29  *
30  * @brief Defines the structure and enumeration for Neural Network Runtime.
31  *
32  * @since 9
33  * @version 1.0
34  */
35 
36 #ifndef NEURAL_NETWORK_RUNTIME_TYPE_H
37 #define NEURAL_NETWORK_RUNTIME_TYPE_H
38 
39 #include <cstddef>
40 #include <cstdint>
41 
42 #ifdef __cplusplus
43 extern "C" {
44 #endif
45 
46 /**
47  * @brief Defines the handles of models for Neural Network Runtime.
48  *
49  * @since 9
50  * @version 1.0
51  */
52 typedef struct OH_NNModel OH_NNModel;
53 
54 /**
55  * @brief Defines the compiler handle for Neural Network Runtime.
56  *
57  * @since 9
58  * @version 1.0
59  */
60 typedef struct OH_NNCompilation OH_NNCompilation;
61 
62 /**
63  * @brief Defines the executor handle for Neural Network Runtime.
64  *
65  * @since 9
66  * @version 1.0
67  */
68 typedef struct OH_NNExecutor OH_NNExecutor;
69 
70 /**
71  * @brief Defines the hardware performance mode.
72  *
73  * @since 9
74  * @version 1.0
75  */
76 typedef enum {
77     /** No performance mode preference */
78     OH_NN_PERFORMANCE_NONE = 0,
79     /** Low power consumption mode*/
80     OH_NN_PERFORMANCE_LOW = 1,
81     /** Medium performance mode */
82     OH_NN_PERFORMANCE_MEDIUM = 2,
83     /** High performance mode */
84     OH_NN_PERFORMANCE_HIGH = 3,
85     /** Ultimate performance mode */
86     OH_NN_PERFORMANCE_EXTREME = 4
87 } OH_NN_PerformanceMode;
88 
89 /**
90  * @brief Defines the model inference task priority.
91  *
92  * @since 9
93  * @version 1.0
94  */
95 typedef enum {
96     /** No priority preference */
97     OH_NN_PRIORITY_NONE = 0,
98     /** Low priority */
99     OH_NN_PRIORITY_LOW = 1,
100     /** Medium priority */
101     OH_NN_PRIORITY_MEDIUM = 2,
102     /** High priority */
103     OH_NN_PRIORITY_HIGH = 3
104 } OH_NN_Priority;
105 
106 /**
107  * @brief Defines error codes for Neural Network Runtime.
108  *
109  * @since 9
110  * @version 1.0
111  */
112 typedef enum {
113     /** The operation is successful. */
114     OH_NN_SUCCESS = 0,
115     /** The operation failed. */
116     OH_NN_FAILED = 1,
117     /** Invalid parameter. */
118     OH_NN_INVALID_PARAMETER = 2,
119     /** Memory-related error, for example, insufficient memory, memory data copy failure, or memory application failure. */
120     OH_NN_MEMORY_ERROR = 3,
121     /** Invalid operation. */
122     OH_NN_OPERATION_FORBIDDEN = 4,
123     /** Null pointer exception */
124     OH_NN_NULL_PTR = 5,
125     /** Invalid file. */
126     OH_NN_INVALID_FILE = 6,
127     /** A hardware error occurs, for example, HDL service crash. */
128     OH_NN_UNAVALIDABLE_DEVICE = 7,
129     /** Invalid path. */
130     OH_NN_INVALID_PATH = 8
131 } OH_NN_ReturnCode;
132 
133 /**
134  * @brief Defines activation function types in the fusion operator for Neural Network Runtime.
135  *
136  * @since 9
137  * @version 1.0
138  */
139 typedef enum : int8_t {
140     /** The fusion activation function is not specified. */
141     OH_NN_FUSED_NONE = 0,
142     /** Fusion relu activation function */
143     OH_NN_FUSED_RELU = 1,
144     /** Fusion relu6 activation function */
145     OH_NN_FUSED_RELU6 = 2
146 } OH_NN_FuseType;
147 
148 /**
149  * @brief Defines the layout type of tensor data.
150  *
151  * @since 9
152  * @version 1.0
153  */
154 typedef enum {
155     /** The tensor does not have a specific layout type (such as scalar or vector). */
156     OH_NN_FORMAT_NONE = 0,
157     /** The tensor arranges data in NCHW format.*/
158     OH_NN_FORMAT_NCHW = 1,
159     /** The tensor arranges data in NHWC format.*/
160     OH_NN_FORMAT_NHWC = 2
161 } OH_NN_Format;
162 
163 /**
164  * @brief Defines device types supported by Neural Network Runtime.
165  *
166  * @since 9
167  * @version 1.0
168  */
169 typedef enum {
170     /** Devices that are not CPU, GPU, or dedicated accelerator*/
171     OH_NN_OTHERS = 0,
172     /** CPU device */
173     OH_NN_CPU = 1,
174     /** GPU device */
175     OH_NN_GPU = 2,
176     /** Dedicated hardware accelerator */
177     OH_NN_ACCELERATOR = 3,
178 } OH_NN_DeviceType;
179 
180 /**
181  * @brief Defines tensor data types supported by Neural Network Runtime.
182  *
183  * @since 9
184  * @version 1.0
185  */
186 typedef enum {
187     /** Unknown type */
188     OH_NN_UNKNOWN = 0,
189     /** bool */
190     OH_NN_BOOL = 1,
191     /** int8 */
192     OH_NN_INT8 = 2,
193     /** int16 */
194     OH_NN_INT16 = 3,
195     /** int32 */
196     OH_NN_INT32 = 4,
197     /** int64 */
198     OH_NN_INT64 = 5,
199     /** uint8 */
200     OH_NN_UINT8 = 6,
201     /** uint16 */
202     OH_NN_UINT16 = 7,
203     /** uint32 */
204     OH_NN_UINT32 = 8,
205     /** uint64 */
206     OH_NN_UINT64 = 9,
207     /** float16 */
208     OH_NN_FLOAT16 = 10,
209     /** float32 */
210     OH_NN_FLOAT32 = 11,
211     /** float64 */
212     OH_NN_FLOAT64 = 12
213 } OH_NN_DataType;
214 
215 
216 /**
217  * @brief Defines operator types supported by Neural Network Runtime.
218  *
219  * @since 9
220  * @version 1.0
221  */
222 typedef enum {
223     /**
224      * Returns the tensor of the sum of the elements corresponding to two input tensors.
225      *
226      * Inputs:
227      *
228      * * <b>input1</b>: first input tensor, of the Boolean or number type.
229      * * <b>input2</b>: second input tensor, whose data type must be the same as that of the first tensor.
230      *
231      * Parameters:
232      *
233      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
234      *       The specified activation function is called before output.
235      *
236      * Outputs:
237      *
238      * * <b>output</b>: sum of <b>input1</b> and <b>input2</b>.
239      *       The data shape is the same as that of the input after broadcasting,
240      *       and the data type is the same as that of the input with a higher precision.
241      */
242     OH_NN_OPS_ADD = 1,
243 
244     /**
245      * Apply 2D average pooling to the input tensor, which now must be in NHWC format. The int8 quantization input is supported.
246      *
247      * If the input contains the <b>padMode</b> parameter:
248      *
249      * Inputs:
250      *
251      * * <b>input</b>: tensor.
252      *
253      * Parameters:
254      *
255      * * <b>kernelSize</b> indicates the kernel size used to obtain the average value. It is an int array [kernel_height, kernel_width].
256      *       The first number indicates the kernel height, and the second number indicates the kernel width.
257      * * <b>strides</b> indicates the distance of kernel moving. The value is an int array [stride_height, stride_width].
258      *       The first number indicates the moving step in height, and the second number indicates the moving step in width.
259      * * <b>padMode</b>: padding mode, which is optional. The value is of the int type and can be <b>0</b> (same) or <b>1</b> (valid).
260      *       The nearest neighbor value is used for padding.
261      *       <b>0</b> (same): The height and width of the output are the same as those of the input.
262      *       The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
263      *       Otherwise, the last additional padding will be completed from the bottom and right.
264      *       <b>1</b> (valid): The possible maximum height and width of the output will be returned in case of no padding. Excessive pixels will be discarded.
265      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
266      *       The specified activation function is called before output.
267      *
268      * If the input contains the <b>padList</b> parameter:
269      *
270      * Inputs:
271      *
272      * * <b>input</b>: tensor.
273      *
274      * Parameters:
275      *
276      * * <b>kernelSize</b> indicates the kernel size used to obtain the average value. It is an int array [kernel_height, kernel_width].
277      *       The first number indicates the kernel height, and the second number indicates the kernel width.
278      * * <b>strides</b> indicates the distance of kernel moving. The value is an int array [stride_height, stride_width].
279      *       The first number indicates the moving step in height, and the second number indicates the moving step in width.
280      * * <b>padList</b>: padding around <b>input</b>. It is an int array [top, bottom, left, right], and the nearest neighbor values are used for padding.
281      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
282      *       The specified activation function is called before output.
283      *
284      * Outputs:
285      *
286      * * <b>output</b>: average pooling result of the input.
287      */
288     OH_NN_OPS_AVG_POOL = 2,
289 
290     /**
291      * Batch normalization is performed on a tensor to scale and shift tensor elements, relieving potential covariate shift in a batch of data.
292      *
293      * Inputs:
294      *
295      * * <b>input</b>: <i>n</i>-dimensional tensor of shape [N, ..., C]. The <i>n</i>th dimension is the number of channels.
296      * * <b>scale</b>: 1D tensor of the scaling factor used to scale the first normalized tensor.
297      * * <b>offset</b>: 1D tensor used to move to the first normalized tensor.
298      * * <b>mean</b>: 1D tensor of the overall mean value. It is used only for inference. In case of training, this parameter must be left empty.
299      * * <b>variance</b>: 1D tensor used for the overall variance. It is used only for inference. In case of training, this parameter must be left empty.
300      *
301      * Parameters:
302      *
303      * * <b>epsilon</b>: fixed small additional value.
304      *
305      * Outputs:
306      *
307      * * <b>output</b>: <i>n</i>-dimensional output tensor whose shape and data type are the same as those of the input.
308      */
309     OH_NN_OPS_BATCH_NORM = 3,
310 
311     /**
312      * Divides the batch dimension of a 4D tensor into small blocks by <b>block_shape</b>, and interleaves these blocks back into the spatial dimension.
313      *
314      * Parameters:
315      *
316      * * <b>input</b>: input tensor. The dimension will be divided into small blocks, and these blocks will be interleaved into the spatial dimension.
317      *
318      * Outputs:
319      *
320      * * <b>blockSize</b>: size of each block to be interleaved into the spatial dimension. The value is an array [height_block, width_block].
321      * * <b>crops</b>: elements truncated from the spatial dimension of the output. The value is a 2D array [[crop0_start, crop0_end],
322      *       [crop1_start, crop1_end]] with the shape of (2, 2).
323      *
324      *
325      * Outputs:
326      *
327      * * <b>output</b>. Assume that the shape of <b>input</b> is (n,h,w,c) and the shape of <b>output</b> is (n',h',w',c'):
328      *       n' = n / (block_shape[0] * block_shape[1])
329      *       h' = h * block_shape[0] - crops[0][0] - crops[0][1]
330      *       w' = w * block_shape[1] - crops[1][0] - crops[1][1]
331      *       c'= c
332      */
333     OH_NN_OPS_BATCH_TO_SPACE_ND = 4,
334 
335     /**
336      * Offsets the data in each dimension of the input tensor.
337      *
338      * Inputs:
339      *
340      * * <b>input</b>: input tensor, which can have two to five dimensions.
341      * * <b>bias</b>: offset of the number of input dimensions.
342      *
343      * Outputs:
344      *
345      * * <b>output</b>: sum of the input tensor and the bias in each dimension.
346      */
347     OH_NN_OPS_BIAS_ADD = 5,
348 
349     /**
350      * Converts the data type in the input tensor.
351      *
352      * Inputs:
353      *
354      * * <b>input</b>: input tensor.
355      * * <b>type</b>: converted data type.
356      *
357      * Outputs:
358      *
359      * * <b>output</b>: converted tensor.
360      */
361     OH_NN_OPS_CAST = 6,
362 
363     /**
364      * Connects tensors in a specified dimension.
365      *
366      * Inputs:
367      *
368      * * <b>input</b>: <i>N</i> input tensors.
369      *
370      * Parameters:
371      *
372      * * <b>axis</b>: dimension for connecting tensors.
373      *
374      * Outputs:
375      *
376      * * <b>output</b>: result of connecting <i>N</i> tensors along the axis.
377      */
378     OH_NN_OPS_CONCAT = 7,
379 
380     /**
381      * 2D convolutional layer.
382      *
383      * If the input contains the <b>padMode</b> parameter:
384      *
385      * Inputs:
386      *
387      * * <b>input</b>: input tensor.
388      * * <b>weight</b>: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
389      *       The value of <b>inChannel</b> must be exactly divided by the value of <b>group</b>.
390      *
391      * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.
392      *       In quantization scenarios, the <b>bias</b> parameter does not require quantization parameters.
393      *       The quantization version requires data input of the <b>OH_NN_INT32</b> type.
394      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
395      *
396      * Parameters:
397      *
398      * * <b>stride</b>: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
399      * * <b>dilation</b>: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
400      *       The value must be greater than or equal to <b>1</b> and cannot exceed the height and width of <b>input</b>.
401      *
402      * * <b>padMode</b>: padding mode of <b>input</b>. The value is of the int type and can be <b>0</b> (same) or <b>1</b> (valid).
403      *       <b>0</b> (same): The height and width of the output are the same as those of the input.
404      *       The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
405      *       Otherwise, the last additional padding will be completed from the bottom and right.
406      *
407      *       <b>1</b> (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded.
408      * * <b>group</b>: number of groups in which the input is divided by <b>in_channel</b>. The value is of the int type.
409      *       If <b>group</b> is <b>1</b>, it is a conventional convolution. If <b>group</b> is greater than <b>1</b> and
410      *       less than or equal to <b>in_channel</b>, it is a group convolution.
411      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>. The specified activation function is called before output.
412      *
413      * If the input contains the <b>padList</b> parameter:
414      *
415      * Inputs:
416      *
417      * * <b>input</b>: input tensor.
418      * * <b>weight</b>: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
419      *       The value of <b>inChannel</b> must be exactly divided by the value of <b>group</b>.
420      *
421      * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.
422      *       In quantization scenarios, the <b>bias</b> parameter does not require quantization parameters.
423      *       The quantization version requires data input of the <b>OH_NN_INT32</b> type.
424      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
425      *
426      * Parameters:
427      *
428      * * <b>stride</b>: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
429      * * <b>dilation</b>: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
430      *       The value must be greater than or equal to <b>1</b> and cannot exceed the height and width of <b>input</b>.
431      * * <b>padList</b>: padding around <b>input</b>. It is an int array [top, bottom, left, right].
432      * * <b>group</b>: number of groups in which the input is divided by <b>in_channel</b>. The value is of the int type.
433      *       If <b>group</b> is <b>1</b>, it is a conventional convolution.
434      *       If <b>group</b> is <b>in_channel</b>, it is depthwiseConv2d. In this case, group==in_channel==out_channel.
435      *       If <b>group</b> is greater than <b>1</b> and less than <b>in_channel</b>, it is a group convolution. In this case, out_channel==group.
436      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
437      *       The specified activation function is called before output.
438      *
439      * Outputs:
440      *
441      * * <b>output</b>: convolution computing result.
442      */
443     OH_NN_OPS_CONV2D = 8,
444 
445     /**
446      * 2D convolution transposition.
447      *
448      * If the input contains the <b>padMode</b> parameter:
449      *
450      * Inputs:
451      *
452      * * <b>input</b>: input tensor.
453      * * <b>weight</b>: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
454      *       The value of <b>inChannel</b> must be exactly divided by the value of <b>group</b>.
455      *
456      * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.
457      *       In quantization scenarios, the <b>bias</b> parameter does not require quantization parameters.
458      *       The quantization version requires data input of the <b>OH_NN_INT32</b> type.
459      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
460      *
461      * * <b>stride</b>: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
462      *
463      * Parameters:
464      *
465      * * <b>dilation</b>: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
466      *       The value must be greater than or equal to <b>1</b> and cannot exceed the height and width of <b>input</b>.
467      * * <b>padMode</b>: padding mode of <b>input</b>. The value is of the int type and can be <b>0</b> (same) or <b>1</b> (valid).
468      *       <b>0</b> (same): The height and width of the output are the same as those of the input.
469      *       The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
470      *       Otherwise, the last additional padding will be completed from the bottom and right.
471      *       <b>1</b> (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded.
472      * * <b>group</b>: number of groups in which the input is divided by <b>in_channel</b>. The value is of the int type.
473      *       If <b>group</b> is <b>1</b>, it is a conventional convolution. If <b>group</b> is greater than <b>1</b> and
474      *       less than or equal to <b>in_channel</b>, it is a group convolution.
475      * * <b>outputPads</b>: padding along the height and width of the output tensor. The value is an int or a tuple.
476      *       It can be a single integer to specify the same value for all spatial dimensions. The amount of output
477      *       padding along a dimension must be less than the stride along this dimension.
478      *
479      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
480      *       The specified activation function is called before output.
481      *
482      * If the input contains the <b>padList</b> parameter:
483      *
484      * Inputs:
485      *
486      * * <b>input</b>: input tensor.
487      * * <b>weight</b>: convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
488      *       The value of <b>inChannel</b> must be exactly divided by the value of <b>group</b>.
489      * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.
490      *       In quantization scenarios, the <b>bias</b> parameter does not require quantization parameters.
491      *       The quantization version requires data input of the <b>OH_NN_INT32</b> type.
492      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
493      *
494      * Parameters:
495      *
496      * * <b>stride</b>: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
497      * * <b>dilation</b>: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
498      *       The value must be greater than or equal to <b>1</b> and cannot exceed the height and width of <b>input</b>.
499      * * <b>padList</b>: padding around <b>input</b>. It is an int array [top, bottom, left, right].
500      * * <b>group</b>: number of groups in which the input is divided by <b>in_channel</b>. The value is of the int type.
501      *       If <b>group</b> is <b>1</b>, it is a conventional convolution. If <b>group</b> is greater than <b>1</b>
502      *       and less than or equal to <b>in_channel</b>, it is a group convolution.
503      * * <b>outputPads</b>: padding along the height and width of the output tensor. The value is an int or a tuple.
504      *       It can be a single integer to specify the same value for all spatial dimensions. The amount of output padding
505      *       along a dimension must be less than the stride along this dimension.
506      *
507      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
508      *       The specified activation function is called before output.
509      *
510      * Outputs:
511      *
512      * * <b>output</b>: computing result after convolution and transposition.
513      */
514     OH_NN_OPS_CONV2D_TRANSPOSE = 9,
515 
516     /**
517      * 2D depthwise separable convolution.
518      *
519      * If the input contains the <b>padMode</b> parameter:
520      *
521      * Inputs:
522      *
523      * * <b>input</b>: input tensor.
524      * * <b>weight</b>: convolution weight in [outChannel, kernelHeight, kernelWidth, 1] format.
525      *       <b>outChannel</b> is equal to <b>channelMultiplier</b> multiplied by <b>inChannel</b>.
526      * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.
527      *       In quantization scenarios, the <b>bias</b> parameter does not require quantization parameters.
528      *       The quantization version requires data input of the <b>OH_NN_INT32</b> type.
529      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
530      *
531      * Parameters:
532      *
533      * * <b>stride</b>: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
534      * * <b>dilation</b>: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
535      *       The value must be greater than or equal to <b>1</b> and cannot exceed the height and width of <b>input</b>.
536      * * <b>padMode</b>: padding mode of <b>input</b>. The value is of the int type and can be <b>0</b> (same) or <b>1</b> (valid).
537      *       <b>0</b> (same): The height and width of the output are the same as those of the input.
538      *       The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
539      *       Otherwise, the last additional padding will be completed from the bottom and right.
540      *
541      *       <b>1</b> (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded.
542      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
543      *       The specified activation function is called before output.
544      *
545      * If the input contains the <b>padList</b> parameter:
546      *
547      * Inputs:
548      *
549      * * <b>input</b>: input tensor.
550      * * <b>weight</b>: convolution weight in [outChannel, kernelHeight, kernelWidth, 1] format.
551      *       <b>outChannel</b> is equal to <b>channelMultiplier</b> multiplied by <b>inChannel</b>.
552      * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.
553      *       In quantization scenarios, the <b>bias</b> parameter does not require quantization parameters.
554      *       The quantization version requires data input of the <b>OH_NN_INT32</b> type.
555      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
556      *
557      * Parameters:
558      *
559      * * <b>stride</b>: movement stride of the convolution kernel in height and width. It is an int array [strideHeight, strideWidth].
560      * * <b>dilation</b>: dilation size of the convolution kernel in height and width. It is an int array [dilationHeight, dilationWidth].
561      *       The value must be greater than or equal to <b>1</b> and cannot exceed the height and width of <b>input</b>.
562      * * <b>padList</b>: padding around <b>input</b>. It is an int array [top, bottom, left, right].
563      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
564      *       The specified activation function is called before output.
565      *
566      * Outputs:
567      *
568      * * <b>output</b>: convolution computing result.
569      */
570     OH_NN_OPS_DEPTHWISE_CONV2D_NATIVE = 10,
571 
572     /**
573      * Divides two input scalars or tensors.
574      *
575      * Inputs:
576      *
577      * * <b>input1</b>: first input, which is a number, a bool, or a tensor whose data type is number or Boolean.
578      * * <b>input2</b>: second input, which must meet the following requirements:
579      *       If the first input is a tensor, the second input can be a real number, a Boolean value, or a tensor whose data type is real number or Boolean value.
580      *       If the first input is a real number or Boolean value, the second input must be a tensor whose data type is real number or Boolean value.
581      *
582      * Parameters:
583      *
584      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
585      *       The specified activation function is called before output.
586      *
587      * Outputs:
588      *
589      * * <b>output</b>: result of dividing <b>input1</b> by <b>input2</b>.
590      */
591     OH_NN_OPS_DIV = 11,
592 
593     /**
594      * Sets parameters to perform product (dot product), sum (addition and subtraction), or max (larger value) on the input.
595      *
596      * Inputs:
597      *
598      * * <b>input1</b>: first input tensor.
599      * * <b>input2</b>: second input tensor.
600      *
601      * Parameters:
602      *
603      * * <b>mode</b>: operation mode. The value is an enumerated value.
604      *
605      * Outputs:
606      *
607      * * <b>output</b>: computing result, which has the same data type and shape of <b>output</b> and <b>input1</b>.
608      */
609     OH_NN_OPS_ELTWISE = 12,
610 
611     /**
612      * Adds an additional dimension to a tensor in the given dimension.
613      *
614      * Inputs:
615      *
616      * * <b>input</b>: input tensor.
617      * * <b>axis</b>: index of the dimension to be added. The value is of the int32_t type and must be a constant in the range [-dim-1, dim].
618      *
619      * Outputs:
620      *
621      * * <b>output</b>: tensor after dimension expansion.
622      */
623     OH_NN_OPS_EXPAND_DIMS = 13,
624 
625     /**
626      * Creates a tensor of the specified dimensions and fills it with a scalar.
627      *
628      * Inputs:
629      *
630      * * <b>value</b>: scalar used to fill the tensor.
631      * * <b>shape</b>: dimensions of the tensor to be created.
632      *
633      * Outputs:
634      *
635      * * <b>output</b>: generated tensor, which has the same data type as <b>value</b>. The tensor shape is specified by the <b>shape</b> parameter.
636      */
637     OH_NN_OPS_FILL = 14,
638 
639     /**
640      * Full connection. The entire input is used as the feature map for feature extraction.
641      *
642      * Inputs:
643      *
644      * * <b>input</b>: full-connection input tensor.
645      * * <b>weight</b>: weight tensor for a full connection.
646      * * <b>bias</b>: full-connection bias. In quantization scenarios, no quantized parameter is required for this parameter.
647      *       If quantization is required, the data must be of the OH_NN_INT32 type.
648      *       The actual quantization parameters are determined by <b>input</b> and <b>weight</b>.
649      *
650      * Parameters:
651      *
652      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
653      *       The specified activation function is called before output.
654      *
655      * Outputs:
656      *
657      * * <b>output</b>: computed tensor.
658      *
659      * If the input contains the <b>axis</b> parameter:
660      *
661      * Inputs:
662      *
663      * * <b>input</b>: full-connection input tensor.
664      * * <b>weight</b>: weight tensor for a full connection.
665      * * <b>bias</b>: full-connection bias. In quantization scenarios, no quantized parameter is required for this parameter.
666      *       If quantization is required, the data must be of the OH_NN_INT32 type. The actual quantization parameters
667      *       are determined by <b>input</b> and <b>weight</b>.
668      *
669      * Parameters:
670      *
671      * * <b>axis</b>: axis in which the full connection is applied. The specified axis and its following axes are
672      *       converted into a 1D tensor for applying the full connection.
673      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
674      *       The specified activation function is called before output.
675      *
676      * Outputs:
677      *
678      * * <b>output</b>: computed tensor.
679      */
680     OH_NN_OPS_FULL_CONNECTION = 15,
681 
682     /**
683      * Returns the slice of the input tensor based on the specified index and axis.
684      *
685      * Inputs:
686      *
687      * * <b>input</b>: tensor to be sliced.
688      * * <b>inputIndices</b>: indices of the specified input on the axis. The value is an array of the int type
689      *       and must be in the range [0,input.shape[axis]).
690      * * <b>axis</b>: axis on which <b>input</b> is sliced. The value is an array with one element of the int32_t type.
691      *
692      * Outputs:
693      *
694      * * <b>output</b>: sliced tensor.
695      */
696     OH_NN_OPS_GATHER = 16,
697 
698     /**
699      * Calculate the <b>Hswish</b> activation value of the input.
700      *
701      * Inputs:
702      *
703      * * An <i>n</i>-dimensional input tensor.
704      *
705      * Outputs:
706      *
707      * * <b>output</b>: <i>n</i>-dimensional <b>Hswish</b> activation value. The data type is the same as that of <b>shape</b> and <b>input</b>.
708      */
709     OH_NN_OPS_HSWISH = 17,
710 
711     /**
712      * For <b>input1</b> and <b>input2</b>, calculate the result of input1[i]<=input2[i] for each pair of elements,
713      * where i is the index of each element in the input tensor.
714      *
715      * Inputs:
716      *
717      * * <b>input1</b>, which can be a real number, Boolean value, or tensor whose data type is real number or NN_BOOL.
718      * * <b>input2</b>, which can be a real number or a Boolean value if <b>input1</b> is a tensor and must be a tensor
719      *       with the data type of real number or NN_BOOL if <b>input1</b> is not a tensor.
720      *
721      * Outputs:
722      *
723      * * A tensor of the data type NN_BOOL. When a quantization model is used, the quantization parameters of the output
724      *   cannot be omitted. However, values of the quantization parameters do not affect the result.
725      */
726     OH_NN_OPS_LESS_EQUAL = 18,
727 
728     /**
729      * Calculate the inner product of <b>input1</b> and <b>input2</b>.
730      *
731      * Inputs:
732      *
733      * * <b>input1</b>: <i>n</i>-dimensional input tensor.
734      * * <b>input2</b>: <i>n</i>-dimensional input tensor.
735      *
736      * Parameters:
737      *
738      * * <b>TransposeX</b>: Boolean value indicating whether to transpose <b>input1</b>.
739      * * <b>TransposeY</b>: Boolean value indicating whether to transpose <b>input2</b>.
740      *
741      * Outputs:
742      *
743      * * <b>output</b>: inner product obtained after calculation. In case of type!=NN_UNKNOWN, the output data type is
744      *       determined by <b>type</b>. In case of type==NN_UNKNOWN, the output data type depends on the data type
745      *       converted during computing of <b>inputX</b> and <b>inputY</b>.
746      *
747      */
748     OH_NN_OPS_MATMUL = 19,
749 
750     /**
751      * Calculates the maximum of <b>input1</b> and <b>input2</b> element-wise. The inputs of <b>input1</b> and <b>input2</b>
752      * comply with the implicit type conversion rules to make the data types consistent. * The inputs must be two tensors or one tensor and one scalar.
753      * When the inputs are two tensors, their data types cannot be both NN_BOOL. Their shapes can be broadcast to the same size.
754      * When the inputs are one tensor and one scalar, the scalar must be a constant.
755      *
756      * Inputs:
757      *
758      * * <b>input1</b>: <i>n</i>-dimensional input tensor of the real number or NN_BOOL type.
759      * * <b>input2</b>: <i>n</i>-dimensional input tensor of the real number or NN_BOOL type.
760      *
761      * Outputs:
762      *
763      * * <b>output</b>: <i>n</i>-dimensional output tensor. The <b>shape</b> and data type of
764      *       <b>output</b> are the same as those of the two inputs with a higher precision.
765      */
766     OH_NN_OPS_MAXIMUM = 20,
767 
768     /**
769      * Applies 2D maximum pooling to the input tensor.
770      *
771      * If the input contains the <b>padMode</b> parameter:
772      *
773      * Inputs:
774      *
775      * * <b>input</b>: tensor.
776      *
777      * Parameters:
778      *
779      * * <b>kernelSize</b>: kernel size used to obtain the maximum. It is an int array [kernel_height, kernel_width].
780      *       The first number indicates the kernel height, and the second number indicates the kernel width.
781      * * <b>strides</b> indicates the distance of kernel moving. The value is an int array [stride_height, stride_width].
782      *       The first number indicates the moving step in height, and the second number indicates the moving step in width.
783      * * <b>padMode</b>: padding mode, which is optional. The value is of the int type and can be <b>0</b> (same)
784      *       or <b>1</b> (valid). The nearest neighbor value is used for padding.
785      *       <b>0</b> (same): The height and width of the output are the same as those of the input.
786      *       The total padding quantity is calculated horizontally and vertically and evenly distributed to the top, bottom, left, and right if possible.
787      *       Otherwise, the last additional padding will be completed from the bottom and right.
788      *       <b>1</b> (valid): The possible maximum height and width of the output will be returned in case of no padding. The excessive pixels will be discarded.
789      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
790      *       The specified activation function is called before output.
791      *
792      * If the input contains the <b>padList</b> parameter:
793      *
794      * Inputs:
795      *
796      * * <b>input</b>: tensor.
797      *
798      * Parameters:
799      *
800      * * <b>kernelSize</b>: kernel size used to obtain the maximum. It is an int array [kernel_height, kernel_width].
801      *       The first number indicates the kernel height, and the second number indicates the kernel width.
802      * * <b>strides</b> indicates the distance of kernel moving. The value is an int array [stride_height, stride_width].
803      *       The first number indicates the moving step in height, and the second number indicates the moving step in width.
804      * * <b>padList</b>: padding around <b>input</b>. It is an int array [top, bottom, left, right],
805      *       and the nearest neighbor values are used for padding.
806      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
807      *       The specified activation function is called before output.
808      *
809      * Outputs:
810      *
811      * * <b>output</b>: tensor obtained after maximum pooling is applied to the input.
812      */
813     OH_NN_OPS_MAX_POOL = 21,
814 
815     /**
816      * Multiplies elements in the same positions of <b>inputX</b> and <b>inputY</b> to obtain the output.
817      * If <b>inputX</b> and <b>inputY</b> have different shapes, expand them to the same shape
818      * through broadcast and then perform multiplication.
819      *
820      * Inputs:
821      *
822      * * <b>input1</b>: <i>n</i>-dimensional tensor.
823      * * <b>input2</b>: <i>n</i>-dimensional tensor.
824      *
825      * Parameters:
826      *
827      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
828      *       The specified activation function is called before output.
829      *
830      * Outputs:
831      *
832      * * Product of each element of <b>input1</b> and <b>input2</b>.
833      */
834     OH_NN_OPS_MUL = 22,
835 
836     /**
837      * Generates a one-hot tensor based on the positions specified by <b>indices</b>. The positions specified by <b>indices</b>
838      * are determined by <b>on_value</b>, and other positions are determined by <b>off_value</b>.
839      *
840      * Inputs:
841      *
842      * * <b>indices</b>: <i>n</i>-dimensional tensor. Each element in <b>indices</b> determines the position of
843      *       <b>on_value</b> in each one-hot vector.
844      * * <b>depth</b>: integer scalar that determines the depth of the one-hot vector. The value of <b>depth</b>
845      *       must be greater than <b>0</b>.
846      * * <b>on_value</b>: scalar that specifies a valid value in the one-hot vector.
847      * * <b>off_value</b>: scalar that specifies the values of other posistions in the one-hot vector except the valid value.
848      *
849      * Parameters:
850      *
851      * * <b>axis</b>: integer scalar that specifies the dimension for inserting the one-hot. Assume that the shape
852      *       of <b>indices</b> is [N, C], and the value of <b>depth</b> is D.
853      *       When <b>axis</b> is <b>0</b>, the shape of the output is [D, N, C].
854      *       When <b>axis</b> is <b>-1</b>, the shape of the output is [N, C, D].
855      *       When <b>axis</b> is <b>1</b>, the shape of the output is [N, D, C].
856      *
857      * Outputs:
858      *
859      * * <b>output</b>: (<i>n</i>+1)-dimensional tensor if <b>indices</b> is an <i>n</i>-dimensional tensor.
860      *       The output shape is determined by <b>indices</b> and <b>axis</b>.
861      */
862     OH_NN_OPS_ONE_HOT = 23,
863 
864     /**
865      * Pads <b>inputX</b> in the specified dimensions.
866      *
867      * Inputs:
868      *
869      * * <b>inputX</b>: <i>n</i>-dimensional tensor in [BatchSize, ...] format.
870      * * <b>paddings</b>: 2D tensor that specifies the length to pad in each dimension. The shape is [n, 2].
871      *       For example, <b>paddings[i][0]</b> indicates the number of paddings to be added preceding <b>inputX</b> in the <i>i</i>th dimension.
872      *       <b>paddings[i][1]</b> indicates the number of paddings to be added following <b>inputX</b> in the <i>i</i>th dimension.
873      *
874      * Parameters:
875      *
876      * * <b>padValues</b>: value to be added to the pad operation. The value is a constant with the same data type as <b>inputX</b>.
877      *
878      * Outputs:
879      *
880      * * <b>output</b>: <i>n</i>-dimensional tensor after padding, with the same dimensions and data type as <b>inputX</b>.
881      *       The shape is determined by <b>inputX</b> and <b>paddings</b>.
882      *       output.shape[i] = input.shape[i] + paddings[i][0]+paddings[i][1]
883      */
884     OH_NN_OPS_PAD = 24,
885 
886     /**
887      * Calculates the <b>y</b> power of each element in <b>input</b>. The inputs must be two tensors or one tensor and one scalar.
888      * When the inputs are two tensors, their data types cannot be both NN_BOOL, and their shapes must be the same.
889      * When the inputs are one tensor and one scalar, the scalar must be a constant.
890      *
891      * Inputs:
892      *
893      * * <b>input</b>: real number, Boolean value, or tensor whose data type is real number or NN_BOOL.
894      * * <b>y</b>: real number, Boolean value, or tensor whose data type is real number or NN_BOOL.
895      *
896      * Outputs:
897      *
898      * * <b>output</b>: tensor, whose shape is determined by the shape of <b>input</b> and <b>y</b> after broadcasting.
899      */
900     OH_NN_OPS_POW = 25,
901 
902     /**
903      * Scales a tensor.
904      *
905      * Inputs:
906      *
907      * * <b>input</b>: <i>n</i>-dimensional tensor.
908      * * <b>scale</b>: scaling tensor.
909      * * <b>bias</b>: bias tensor.
910      *
911      * Parameters:
912      *
913      * * <b>axis</b>: dimensions to be scaled.
914      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
915      *       The specified activation function is called before output.
916      *
917      * Outputs:
918      *
919      * * <b>output</b>: scaled <i>n</i>-dimensional tensor, whose data type is the same as that of <b>input</b> and
920      *       shape is determined by <b>axis</b>.
921      */
922     OH_NN_OPS_SCALE = 26,
923 
924     /**
925      * Calculates the shape of the input tensor.
926      *
927      * Inputs:
928      *
929      * * <b>input</b>: <i>n</i>-dimensional tensor.
930      *
931      * Outputs:
932      *
933      * * <b>output</b>: integer array representing the dimensions of the input tensor.
934      */
935     OH_NN_OPS_SHAPE = 27,
936 
937     /**
938      * Applies the <b>sigmoid</b> operation to the input tensor.
939      *
940      * Inputs:
941      *
942      * * <b>input</b>: <i>n</i>-dimensional tensor.
943      *
944      * Outputs:
945      *
946      * * <b>output</b>: result of the <b>sigmoid</b> operation. It is an <i>n</i>-dimensional tensor
947      *       with the same data type and shape as <b>input</b>.
948      */
949     OH_NN_OPS_SIGMOID = 28,
950 
951     /**
952      * Slices a tensor of the specified size from the input in each dimension.
953      *
954      * Inputs:
955      *
956      * * <b>input</b>: <i>n</i>-dimensional input tensor.
957      * * <b>begin</b>: start of the slice, which is an array of integers greater than or equal to 0.
958      * * <b>size</b>: slice length, which is an array of integers greater than or equal to 0.
959      *       Assume that a dimension is <b>i</b> and 1<=size[i]<=input.shape[i]-begin[i].
960      *
961      * Outputs:
962      *
963      * * <b>output</b>: <i>n</i>-dimensional tensor obtained by slicing.
964      *       The <b>TensorType</b>, shape, and size of the output are the same as those of the input.
965      */
966     OH_NN_OPS_SLICE = 29,
967 
968     /**
969      * Applies the <b>softmax</b> operation to the input tensor.
970      *
971      * Inputs:
972      *
973      * * <b>input</b>: <i>n</i>-dimensional input tensor.
974      *
975      * Parameters:
976      *
977      * * <b>axis</b>: dimension in which the <b>softmax</b> operation is performed.
978      *       The value is of the int64 type. It is an integer in the range [-n, n).
979      *
980      * Outputs:
981      *
982      * * <b>output</b>: result of the <b>softmax</b> operation. It is an <i>n</i>-dimensional tensor with
983      *       the same data type and shape as <b>input</b>.
984      */
985     OH_NN_OPS_SOFTMAX = 30,
986 
987     /**
988      * Divides a 4D tensor into small blocks and combines these blocks in the original batch.
989      * The number of blocks is <b>blockShape[0]</b> multiplied by <b>blockShape[1]</b>.
990      *
991      * Inputs:
992      *
993      * * <b>input</b>: 4D tensor.
994      *
995      * Parameters:
996      *
997      * * <b>blockShape</b>: a pair of integers. Each of them is greater than or equal to <b>1</b>.
998      * * <b>paddings</b>: a pair of arrays. Each of them consists of two integers. The four integers that form <b>paddings</b>
999      *       must be greater than or equal to <b>0</b>. <b>paddings[0][0]</b> and <b>paddings[0][1]</b>
1000      *       specify the number of paddings in the third dimension, and <b>paddings[1][0]</b> and <b>paddings[1][1]</b>
1001      *       specify the number of paddings in the fourth dimension.
1002      *
1003      * Outputs:
1004      *
1005      * * <b>output</b>: 4D tensor with the same data type as <b>input</b>. The shape is determined by <b>input</b>,
1006      *       <b>blockShape</b>, and <b>paddings</b>. Assume that the input shape is [n,c,h,w], then:
1007      *       output.shape[0] = n * blockShape[0] * blockShape[1]
1008      *       output.shape[1] = c
1009      *       output.shape[2] = (h + paddings[0][0] + paddings[0][1]) / blockShape[0]
1010      *       output.shape[3] = (w + paddings[1][0] + paddings[1][1]) / blockShape[1]
1011      *       (h + paddings[0][0] + paddings[0][1]) and (w + paddings[1][0] + paddings[1][1]) is exactly divisible by
1012      *       (h + paddings[0][0] + paddings[0][1]) and (w + paddings[1][0] + paddings[1][1]).
1013      *
1014      */
1015     OH_NN_OPS_SPACE_TO_BATCH_ND = 31,
1016 
1017     /**
1018      * Splits the input into multiple tensors along the axis dimension. The number of tensors is specified by <b>outputNum</b>.
1019      *
1020      * Inputs:
1021      *
1022      * * <b>input</b>: <i>n</i>-dimensional tensor.
1023      *
1024      * Parameters:
1025      *
1026      * * <b>outputNum</b>: number of output tensors. The data type is long.
1027      * * <b>size_splits</b>: size of each tensor split from the input. The value is a 1D tensor of the int type.
1028      *       If <b>size_splits</b> is empty, the input will be evenly split into tensors of the same size. In this case,
1029      *       <b>input.shape[axis]</b> can be exactly divisible by <b>outputNum</b>.
1030      *       If <b>size_splits</b> is not empty, the sum of all its elements must be equal to <b>input.shape[axis]</b>.
1031      * * <b>axis</b>: splitting dimension of the int type.
1032      *
1033      * Outputs:
1034      *
1035      * * <b>outputs</b>: array of <i>n</i>-dimensional tensors, with the same data type and dimensions.
1036      *       The data type of each tensor is the same as that of <b>input</b>.
1037      */
1038     OH_NN_OPS_SPLIT = 32,
1039 
1040     /**
1041      * Calculates the square root of a tensor.
1042      *
1043      * Inputs:
1044      *
1045      * * <b>input</b>: <i>n</i>-dimensional tensor.
1046      *
1047      * Outputs:
1048      *
1049      * * <b>output</b>: square root of the input. It is an <i>n</i>-dimensional tensor with the same data type and shape as <b>input</b>.
1050      */
1051     OH_NN_OPS_SQRT = 33,
1052 
1053     /**
1054      * Calculates the square of the difference between two tensors. The <b>SquaredDifference</b> operator supports tensor and tensor subtraction.
1055      * If two tensors have different <b>TensorTypes</b>, the Sub operator converts the low-precision tensor to a high-precision one.
1056      * If two tensors have different shapes, the two tensors can be extended to tensors with the same shape through broadcast.
1057      *
1058      * Inputs:
1059      *
1060      * * <b>input1</b>: minuend, which is a tensor of the NN_FLOAT16, NN_FLOAT32, NN_INT32, or NN_BOOL type.
1061      * * <b>input2</b>: subtrahend, which is a tensor of the NN_FLOAT16, NN_FLOAT32, NN_INT32, or NN_BOOL type.
1062      *
1063      * Outputs:
1064      *
1065      * * <b>output</b>: square of the difference between two inputs. The output shape is determined
1066      *       by<b>input1</b> and <b>input2</b>. If they have the same shape, the output tensor has the same shape as them.
1067      *       If they have different shapes, perform the broadcast operation on <b>input1</b> and <b>input2</b> and perform subtraction.
1068      *       <b>TensorType</b> of the output is the same as that of the input tensor with higher precision.
1069      */
1070     OH_NN_OPS_SQUARED_DIFFERENCE = 34,
1071 
1072     /**
1073      * Removes the dimension with a length of 1 from the specified axis. The int8 quantization input is supported.
1074      * Assume that the input shape is [2, 1, 1, 2, 2] and axis is [0,1], the output shape is [2, 1, 2, 2],
1075      * which means the dimension whose length is 0 between dimensions 0 and dimension 1 is removed.
1076      *
1077      * Inputs:
1078      *
1079      * * <b>input</b>: <i>n</i>-dimensional tensor.
1080      *
1081      * Parameters:
1082      *
1083      * * <b>axis</b>: dimension to be removed. The value is of int64_t type and can be an integer in the range [-n, n) or an array.
1084      *
1085      * Outputs:
1086      *
1087      * * <b>output</b>: output tensor.
1088      */
1089     OH_NN_OPS_SQUEEZE = 35,
1090 
1091     /**
1092      * Stacks multiple tensors along the specified axis. If each tensor has <i>n</i> dimensions before stacking,
1093      * the output tensor will have <i>n</i>+1 dimensions.
1094      *
1095      * Inputs:
1096      *
1097      * * <b>input</b>: input for stacking, which can contain multiple <i>n</i>-dimensional tensors.
1098      *       Each of them must have the same shape and type.
1099      *
1100      * Parameters:
1101      *
1102      * * <b>axis</b>: dimension for tensor stacking, which is an integer. The value range is [-(n+1),(n+1)),
1103      *       which means a negative number is allowed.
1104      *
1105      * Outputs:
1106      *
1107      * * <b>output</b>: stacking result of the input along the axis dimension. The value is an <i>n</i>+1-dimensional tensor
1108      *       and has the same <b>TensorType</b> as the input.
1109      */
1110     OH_NN_OPS_STACK = 36,
1111 
1112     /**
1113      * Slices a tensor with the specified stride.
1114      *
1115      * Inputs:
1116      *
1117      * * <b>input</b>: <i>n</i>-dimensional input tensor.
1118      * * <b>begin</b>: start of slicing, which is a 1D tensor. The length of <b>begin</b> is <i>n</i>.
1119      *       <b>begin[i]</b> specifies the start of slicing in the <i>i</i>th dimension.
1120      * * <b>end</b>: end of slicing, which is a 1D tensor. The length of <b>end</b> is <i>n</i>.
1121      *       <b>end[i]</b> specifies the end of slicing in the <i>i</i>th dimension.
1122      * * <b>strides</b>: slicing stride, which is a 1D tensor. The length of <b>strides</b> is <i>n</i>.
1123      *       strides[i] specifies the stride at which the tensor is sliced in the <i>i</i>th dimension.
1124      *
1125      * Parameters:
1126      *
1127      * * <b>beginMask</b>: an integer used to mask <b>begin</b>. <b>beginMask</b> is represented in binary code.
1128      *       In case of binary(beginMask)[i]==1, for the <i>i</i>th dimension, elements are sliced from the first element
1129      *       at <b>strides[i]</b> until the end[i]-1 element.
1130      *
1131      * * <b>endMask</b>: an integer used to mask <b>end</b>. <b>endMask</b> is represented in binary code.
1132      *       In case of binary(endMask)[i]==1, elements are sliced from the element at the <b>begin[i]</b> position
1133      *       in the <i>i</i>th dimension until the tensor boundary at <b>strides[i]</b>.
1134      *
1135      * * <b>ellipsisMask</b>: integer used to mask <b>begin</b> and <b>end</b>. <b>ellipsisMask</b> is represented in binary code.
1136      *       In case of binary(ellipsisMask)[i]==1, elements are sliced from the first element at <b>strides[i]</b> in the <i>i</i>th dimension
1137      *       until the tensor boundary. Only one bit of <b>binary(ellipsisMask)</b> can be a non-zero value.
1138      *
1139      * * <b>newAxisMask</b>: new dimension, which is an integer. <b>newAxisMask</b> is represented in binary code.
1140      *       In case of binary(newAxisMask)[i]==1, a new dimension whose length is 1 is inserted into the <i>i</i>th dimension.
1141      * * <b>shrinkAxisMask</b>: shrinking dimension, which is an integer. * <b>shrinkAxisMask</b> is represented in binary code.
1142      *       In the case of binary(shrinkAxisMask)[i]==1, all elements in the <i>i</i>th dimension will be discarded,
1143      *       and the length of the <i>i</i>th dimension is shrunk to <b>1</b>.
1144      *
1145      * Outputs:
1146      *
1147      * * A tensor, with the same data type as <b>input</b>. The number of dimensions of the output tensor is rank(input[0])+1.
1148      */
1149     OH_NN_OPS_STRIDED_SLICE = 37,
1150 
1151     /**
1152      * Calculates the difference between two tensors.
1153      *
1154      * Inputs:
1155      *
1156      * * <b>input1</b>: minuend, which is a tensor.
1157      * * <b>input2</b>: subtrahend, which is a tensor.
1158      *
1159      * Parameters:
1160      *
1161      * * <b>activationType</b> is an integer constant which is contained in <b>FuseType</b>.
1162      *       The specified activation function is called before output.
1163      *
1164      * Outputs:
1165      *
1166      * * <b>output</b>: difference between the two tensors. The output shape is determined by<b>input1</b> and <b>input2</b>.
1167      *       If they have the same shape, the output tensor has the same shape as them.
1168      *       If they have different shapes, perform the broadcast operation on <b>input1</b> and <b>input2</b> and perform subtraction.
1169      *       <b>TensorType</b> of the output is the same as that of the input tensor with higher precision.
1170      */
1171     OH_NN_OPS_SUB = 38,
1172 
1173     /**
1174      * Computes hyperbolic tangent of the input tensor.
1175      *
1176      * Inputs:
1177      *
1178      * * <b>input</b>: <i>n</i>-dimensional tensor.
1179      *
1180      * Outputs:
1181      *
1182      * * <b>output</b>: hyperbolic tangent of the input. The <b>TensorType</b> and tensor shape are the same as those of the input.
1183      */
1184     OH_NN_OPS_TANH = 39,
1185 
1186     /**
1187      * Copies a tensor the specified times.
1188      *
1189      * Inputs:
1190      * * <b>input</b>: <i>n</i>-dimensional tensor.
1191      * * <b>multiples</b>: number of times that the input tensor is copied in each dimension. The value is a 1D tensor.
1192      *       The length <i>m</i> is not less than the number of dimensions, that is, <i>n</i>.
1193      *
1194      * Outputs:
1195      * * An <i>m</i>-dimensional tensor whose <b>TensorType</b> is the same as that of the input. If <b>input</b> and
1196      *       <b>multiples</b> have the same length, <b>input</b> and <b>output</b> have the same number of dimensions.
1197      *       If the length of <b>multiples</b> is greater than <i>n</i>, 1 is used to fill the input dimension,
1198      *       and then the input is copied in each dimension the specified times to obtain the <i>m</i>-dimensional tensor.
1199      */
1200     OH_NN_OPS_TILE = 40,
1201 
1202     /**
1203      * Transposes data of <b>input 0</b> based on <b>permutation</b>.
1204      *
1205      * Inputs:
1206      *
1207      * * <b>input</b>: <i>n</i>-dimensional tensor to be transposed.
1208      * * <b>permutation</b>: The value is a 1D tensor whose length is the same as the number of dimensions of <b>input 0</b>.
1209      *
1210      * Outputs:
1211      *
1212      * * <b>output</b>: <i>n</i>-dimensional tensor. <b>TensorType</b> of <b>output 0</b> is the same as that of <b>input 0</b>,
1213      *       and the output shape is determined by the shape and <b>permutation</b> of <b>input 0</b>.
1214      */
1215     OH_NN_OPS_TRANSPOSE = 41,
1216 
1217     /**
1218      * Calculates the average value in the specified dimension. If <b>keepDims</b> is set to <b>false</b>, the number of dimensions
1219      * is reduced for the input; if <b>keepDims</b> is set to <b>true</b>, the number of dimensions is retained.
1220      *
1221      * Inputs:
1222      *
1223      * * <b>input</b>: <i>n</i>-dimensional input tensor, where <i>n</i> is less than 8.
1224      * * <b>axis</b>: dimension used to calculate the average value. The value is a 1D tensor. The value range of each element in <b>axis</b> is [–n, n).
1225      *
1226      * Parameters:
1227      *
1228      * * <b>keepDims</b>: indicates whether to retain the dimension. The value is a Boolean value.
1229      *
1230      * Outputs:
1231      *
1232      * * <b>output</b>: <i>m</i>-dimensional output tensor whose data type is the same as that of the input. If <b>keepDims</b> is
1233      *       <b>false</b>, m==n. If <b>keepDims</b> is <b>true</b>, m<n.
1234      */
1235     OH_NN_OPS_REDUCE_MEAN = 42,
1236 
1237     /**
1238      * The Bilinear method is used to deform the input based on the given parameters.
1239      *
1240      * Inputs:
1241      *
1242      * * <b>input</b>: 4D input tensor. Each element in the input cannot be less than 0. The input layout must be [batchSize, height, width, channels].
1243      *
1244      * Parameters:
1245      *
1246      * * <b>newHeight</b>: resized height of the 4D tensor.
1247      * * <b>newWidth</b>: resized width of the 4D tensor.
1248      * * <b>preserveAspectRatio</b>: indicates whether to maintain the height/width ratio of <b>input</b> after resizing.
1249      * * <b>coordinateTransformMode</b>: coordinate transformation method used by the resize operation. The value is an int32 integer.
1250      *       Currently, the following methods are supported:
1251      * * <b>excludeOutside</b>: an int64 floating point number. When its value is <b>1</b>, the sampling weight of the part that
1252      *       exceeds the boundary of <b>input</b> is set to <b>0</b>, and other weights are normalized.
1253      *
1254      * Outputs:
1255      *
1256      * * <b>output</b>: <i>n</i>-dimensional tensor, with the same shape and data type as <b>input</b>.
1257      */
1258     OH_NN_OPS_RESIZE_BILINEAR = 43,
1259 
1260     /**
1261      * Calculates the reciprocal of the square root of a tensor.
1262      *
1263      * Inputs:
1264      *
1265      * * <b>input</b>: <i>n</i>-dimensional tensor, where <i>n</i> is less than 8. Each element of the tensor cannot be less than 0.
1266      *
1267      * Outputs:
1268      *
1269      * * <b>output</b>: <i>n</i>-dimensional tensor, with the same shape and data type as <b>input</b>.
1270      */
1271     OH_NN_OPS_RSQRT = 44,
1272 
1273     /**
1274      * Reshapes a tensor.
1275      *
1276      * Inputs:
1277      *
1278      * * <b>input</b>: <i>n</i>-dimensional input tensor.
1279      * * <b>InputShape</b>: shape of the output tensor. The value is a 1D constant tensor.
1280      *
1281      * Outputs:
1282      *
1283      * * <b>output</b>: tensor whose data type is the same as that of <b>input</b> and shape is determined by <b>InputShape</b>.
1284      */
1285     OH_NN_OPS_RESHAPE = 45,
1286 
1287     /**
1288      * Calculates the PReLU activation value of <b>input</b> and <b>weight</b>.
1289      *
1290      * Inputs:
1291      *
1292      * * <b>input</b>: <i>n</i>-dimensional tensor. If <i>n</i> is greater than or equal to 2, <b>inputX</b> must be [BatchSize, ..., Channels].
1293      *       The second dimension is the number of channels.
1294      * * <b>weight</b>: 1D tensor. The length of <b>weight</b> must be 1 or equal to the number of channels. If the length of <b>weight</b> is 1,
1295      *       all channels share the same weight.
1296      *       If the length of <b>weight</b> is equal to the number of channels, each channel exclusively has a weight.
1297      *       If <i>n</i> is less than 2 for <b>inputX</b>, the <b>weight</b> length must be 1.
1298      *
1299      * Outputs:
1300      *
1301      * * <b>output</b>: PReLU activation value of <b>x</b>, with the same shape and data type as <b>inputX</b>.
1302      */
1303     OH_NN_OPS_PRELU = 46,
1304 
1305     /**
1306      * Calculates the Relu activation value of <b>input</b>.
1307      *
1308      * Inputs:
1309      *
1310      * * <b>input</b>: <i>n</i>-dimensional input tensor.
1311      *
1312      * Outputs:
1313      *
1314      * * <b>output</b>: <i>n</i>-dimensional tensor, with the same data type and shape as the input tensor.
1315      */
1316     OH_NN_OPS_RELU = 47,
1317 
1318     /**
1319      * Calculates the Relu6 activation value of the input, that is, calculate min(max(x, 0), 6) for each element x in the input.
1320      *
1321      * Inputs:
1322      *
1323      * * <b>input</b>: <i>n</i>-dimensional input tensor.
1324      *
1325      * Outputs:
1326      *
1327      * * <b>output</b>: <i>n</i>-dimensional Relu6 tensor, with the same data type and shape as the input tensor.
1328      */
1329     OH_NN_OPS_RELU6 = 48,
1330 
1331     /**
1332      * Applies layer normalization for a tensor from the specified axis.
1333      *
1334      * Inputs:
1335      *
1336      * * <b>input</b>: <i>n</i>-dimensional input tensor.
1337      * * <b>gamma</b>: <i>m</i>-dimensional tensor. The dimensions of <b>gamma</b> must be the same as
1338      *       the shape of the part of the input tensor to normalize.
1339      * * <b>beta</b>: <i>m</i>-dimensional tensor with the same shape as <b>gamma</b>.
1340      *
1341      * Parameters:
1342      *
1343      * * <b>beginAxis</b> is an NN_INT32 scalar that specifies the axis from which normalization starts. The value range is [1, rank(input)).
1344      * * <b>epsilon</b> is a scalar of NN_FLOAT32. It is a tiny amount in the normalization formula. The common value is 1e-7.
1345      *
1346      * Outputs:
1347      *
1348      * * <b>output</b>: <i>n</i>-dimensional tensor, with the same data type and shape as the input tensor.
1349      */
1350     OH_NN_OPS_LAYER_NORM = 49,
1351 
1352     /**
1353      * Calculates the accumulated value for a tensor along the specified dimension.
1354      *
1355      * Inputs:
1356      *
1357      * * <b>input</b>: <i>n</i>-dimensional input tensor, where <i>n</i> is less than 8.
1358      * * <b>axis</b>: dimension used to calculate the product. The value is a 1D tensor. The value range of each element in <b>axis</b> is [–n, n).
1359      *
1360      * Parameters:
1361      *
1362      * * <b>keepDims</b>: indicates whether to retain the dimension. The value is a Boolean value.
1363      *       When its value is <b>true</b>, the number of output dimensions is the same as that of the input.
1364      *       When its value is <b>false</b>, the number of output dimensions is reduced.
1365      *
1366      * Outputs:
1367      *
1368      * * <b>output</b>: <i>m</i>-dimensional output tensor whose data type is the same as that of the input.
1369      *       If <b>keepDims</b> is <b>false</b>, m==n. If <b>keepDims</b> is <b>true</b>, m<n.
1370      */
1371     OH_NN_OPS_REDUCE_PROD = 50,
1372 
1373     /**
1374      * Operates the logical OR in the specified dimension. If <b>keepDims</b> is set to <b>false</b>,
1375      * the number of dimensions is reduced for the input; if <b>keepDims</b> is set to <b>true</b>, the number of dimensions is retained.
1376      *
1377      * Inputs:
1378      *
1379      * * A <i>n</i>-dimensional input tensor, where <i>n</i> is less than 8.
1380      * * A 1D tensor specifying the dimension used to operate the logical OR. The value range of each element in <b>axis</b> is [–n, n).
1381      *
1382      * Parameters:
1383      *
1384      * * <b>keepDims</b>: indicates whether to retain the dimension. The value is a Boolean value.
1385      *
1386      * Outputs:
1387      * * <b>output</b>: <i>m</i>-dimensional output tensor whose data type is the same as that of the input.
1388      *       If <b>keepDims</b> is <b>false</b>, m==n. If <b>keepDims</b> is <b>true</b>, m<n.
1389      */
1390     OH_NN_OPS_REDUCE_ALL = 51,
1391 
1392     /**
1393      * Converts the data type.
1394      *
1395      * Inputs:
1396      *
1397      * * <b>input</b>: <i>n</i>-dimensional tensor.
1398      *
1399      * Parameters:
1400      *
1401      * * <b>src_t</b>: data type of the input.
1402      * * <b>dst_t</b>: data type of the output.
1403      *
1404      * Outputs:
1405      *
1406      * * <b>output</b>: <i>n</i>-dimensional tensor. The data type is determined by <b>input2</b>.
1407      *       The output shape is the same as the input shape.
1408      */
1409     OH_NN_OPS_QUANT_DTYPE_CAST = 52,
1410 
1411     /**
1412      * Obtains the values and indices of the largest <i>k</i> entries in the last dimension.
1413      *
1414      * Inputs:
1415      *
1416      * * <b>input</b>: <i>n</i>-dimensional tensor.
1417      * * <b>input</b> <i>k</i>: first <i>k</i> records of data and their indices.
1418      *
1419      * Parameters:
1420      *
1421      * * <b>sorted</b>: order of sorting. The value <b>true</b> means descending and <b>false</b> means ascending.
1422      *
1423      * Outputs:
1424      *
1425      * * <b>output0</b>: largest <i>k</i> elements in each slice of the last dimension.
1426      * * <b>output1</b>: index of the value in the last dimension of the input.
1427      */
1428     OH_NN_OPS_TOP_K = 53,
1429 
1430     /**
1431      * Returns the index of the maximum tensor value across axes.
1432      *
1433      * Inputs:
1434      *
1435      * * <b>input</b>: <i>n</i>-dimensional tensor (N, ∗), where ∗ means any number of additional dimensions.
1436      *
1437      * Parameters:
1438      *
1439      * * <b>axis</b>: dimension for calculating the index of the maximum.
1440      * * <b>keep_dims</b>: indicates whether to maintain the input tensor dimension. The value is a Boolean value.
1441      *
1442      * Outputs:
1443      * * <b>output</b>: index of the maximum input tensor on the axis. The value is a tensor.
1444      */
1445     OH_NN_OPS_ARG_MAX = 54,
1446 
1447     /**
1448      * Adds a dimension based on the value of <b>axis</b>.
1449      *
1450      * Inputs:
1451      * * <b>input</b>: <i>n</i>-dimensional tensor.
1452      *
1453      * Parameters:
1454      *
1455      * * <b>axis</b>: dimension to be added. The value of <b>axis</b> can be an integer or an array of integers.
1456      *       The value range of the integer is [-n, n).
1457      *
1458      * Outputs:
1459      * * <b>output</b>: output tensor.
1460      */
1461     OH_NN_OPS_UNSQUEEZE = 55,
1462 
1463     /**
1464      * Gaussian error linear unit activation function. The int quantization input is not supported. output=0.5∗input∗(1+tanh(input/2))
1465      *
1466      * Inputs:
1467      * * An <i>n</i>-dimensional input tensor.
1468      *
1469      * Outputs:
1470      * * <b>output</b>: <i>n</i>-dimensional tensor, with the same data type and shape as the input tensor.
1471      */
1472     OH_NN_OPS_GELU = 56,
1473 } OH_NN_OperationType;
1474 
1475 /**
1476  * @brief Enumerates the tensor data types.
1477  *
1478  * Tensors are usually used to set the input, output, and operator parameters of a model. When a tensor is used
1479  * as the input or output of a model (or operator), set the tensor type to {@link OH_NN_TENSOR}.
1480  * When the tensor is used as an operator parameter, select an enumerated value other than {@link OH_NN_TENSOR} as the tensor type.
1481  * Assume that the <b>pad</b> parameter of the {@link OH_NN_OPS_CONV2D} operator is being set.
1482  * You need to set the <b>type</b> attribute of the {@link OH_NN_Tensor} instance to {@link OH_NN_CONV2D_PAD}.
1483  * The settings of other operator parameters are similar. The enumerated values are named
1484  * in the format OH_NN_{<i>Operator name</i>}_{<i>Attribute name</i>}.
1485  *
1486  * @since 9
1487  * @version 1.0
1488  */
1489 typedef enum {
1490     /** This enumerated value is used when the tensor is used as the input or output of a model (or operator). */
1491     OH_NN_TENSOR = 0,
1492 
1493     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter of the Add operator. */
1494     OH_NN_ADD_ACTIVATIONTYPE = 1,
1495 
1496     /** This enumerated value is used when the tensor is used as the <b>kernel_size</b> parameter of the AvgPool operator. */
1497     OH_NN_AVG_POOL_KERNEL_SIZE = 2,
1498     /** This enumerated value is used when the tensor is used as the <b>stride</b> parameter of the AvgPool operator. */
1499     OH_NN_AVG_POOL_STRIDE = 3,
1500     /** This enumerated value is used when the tensor is used as the <b>pad_mode</b> parameter of the AvgPool operator. */
1501     OH_NN_AVG_POOL_PAD_MODE = 4,
1502     /** This enumerated value is used when the tensor is used as the <b>pad</b> parameter of the AvgPool operator. */
1503     OH_NN_AVG_POOL_PAD = 5,
1504     /** This enumerated value is used when the tensor is used as the <b>activation_type</b> parameter of the AvgPool operator. */
1505     OH_NN_AVG_POOL_ACTIVATION_TYPE = 6,
1506 
1507     /** This enumerated value is used when the tensor is used as the <b>eosilon</b> parameter of the BatchNorm operator. */
1508     OH_NN_BATCH_NORM_EPSILON = 7,
1509 
1510     /** This enumerated value is used when the tensor is used as the <b>blockSize</b> parameter of the BatchToSpaceND operator. */
1511     OH_NN_BATCH_TO_SPACE_ND_BLOCKSIZE = 8,
1512     /** This enumerated value is used when the tensor is used as the <b>crops</b> parameter of the BatchToSpaceND operator. */
1513     OH_NN_BATCH_TO_SPACE_ND_CROPS = 9,
1514 
1515     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter of the Concat operator. */
1516     OH_NN_CONCAT_AXIS = 10,
1517 
1518     /** This enumerated value is used when the tensor is used as the <b>strides</b> parameter of the Conv2D operator. */
1519     OH_NN_CONV2D_STRIDES = 11,
1520     /** This enumerated value is used when the tensor is used as the <b>pad</b> parameter of the Conv2D operator. */
1521     OH_NN_CONV2D_PAD = 12,
1522     /** This enumerated value is used when the tensor is used as the <b>dilation</b> parameter of the Conv2D operator. */
1523     OH_NN_CONV2D_DILATION = 13,
1524     /** This enumerated value is used when the tensor is used as the <b>padMode</b> parameter of the Conv2D operator. */
1525     OH_NN_CONV2D_PAD_MODE = 14,
1526     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter of the Conv2D operator. */
1527     OH_NN_CONV2D_ACTIVATION_TYPE = 15,
1528     /** This enumerated value is used when the tensor is used as the <b>group</b> parameter of the Conv2D operator. */
1529     OH_NN_CONV2D_GROUP = 16,
1530 
1531     /** This enumerated value is used when the tensor is used as the <b>strides</b> parameter of the Conv2DTranspose operator. */
1532     OH_NN_CONV2D_TRANSPOSE_STRIDES = 17,
1533     /** This enumerated value is used when the tensor is used as the <b>pad</b> parameter of the Conv2DTranspose operator. */
1534     OH_NN_CONV2D_TRANSPOSE_PAD = 18,
1535     /** This enumerated value is used when the tensor is used as the <b>dilation</b> parameter of the Conv2DTranspose operator. */
1536     OH_NN_CONV2D_TRANSPOSE_DILATION = 19,
1537     /** This enumerated value is used when the tensor is used as the <b>outputPaddings</b> parameter of the Conv2DTranspose operator. */
1538     OH_NN_CONV2D_TRANSPOSE_OUTPUT_PADDINGS = 20,
1539     /** This enumerated value is used when the tensor is used as the <b>padMode</b> parameter of the Conv2DTranspose operator. */
1540     OH_NN_CONV2D_TRANSPOSE_PAD_MODE = 21,
1541     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter of the Conv2DTranspose operator. */
1542     OH_NN_CONV2D_TRANSPOSE_ACTIVATION_TYPE = 22,
1543     /** This enumerated value is used when the tensor is used as the <b>group</b> parameter of the Conv2DTranspose operator. */
1544     OH_NN_CONV2D_TRANSPOSE_GROUP = 23,
1545 
1546     /** This enumerated value is used when the tensor is used as the <b>strides</b> parameter of the DepthwiseConv2dNative operator. */
1547     OH_NN_DEPTHWISE_CONV2D_NATIVE_STRIDES = 24,
1548     /** This enumerated value is used when the tensor is used as the <b>pad</b> parameter of the DepthwiseConv2dNative operator. */
1549     OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD = 25,
1550     /** This enumerated value is used when the tensor is used as the <b>dilation</b> parameter of the DepthwiseConv2dNative operator. */
1551     OH_NN_DEPTHWISE_CONV2D_NATIVE_DILATION = 26,
1552     /** This enumerated value is used when the tensor is used as the <b>padMode</b> parameter of the DepthwiseConv2dNative operator. */
1553     OH_NN_DEPTHWISE_CONV2D_NATIVE_PAD_MODE = 27,
1554     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter of the DepthwiseConv2dNative operator. */
1555     OH_NN_DEPTHWISE_CONV2D_NATIVE_ACTIVATION_TYPE = 28,
1556 
1557     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter of the Div operator. */
1558     OH_NN_DIV_ACTIVATIONTYPE = 29,
1559 
1560     /** This enumerated value is used when the tensor is used as the <b>mode</b> parameter of the Eltwise operator. */
1561     OH_NN_ELTWISE_MODE = 30,
1562 
1563     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter of the FullConnection operator. */
1564     OH_NN_FULL_CONNECTION_AXIS = 31,
1565     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter of the FullConnection operator. */
1566     OH_NN_FULL_CONNECTION_ACTIVATIONTYPE = 32,
1567 
1568     /** This enumerated value is used when the tensor is used as the <b>transposeA</b> parameter of the Matmul operator. */
1569     OH_NN_MATMUL_TRANSPOSE_A = 33,
1570     /** This enumerated value is used when the tensor is used as the <b>transposeB</b> parameter of the Matmul operator. */
1571     OH_NN_MATMUL_TRANSPOSE_B = 34,
1572     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter of the Matmul operator. */
1573     OH_NN_MATMUL_ACTIVATION_TYPE = 35,
1574 
1575     /** This enumerated value is used when the tensor is used as the <b>kernel_size</b> parameter of the MaxPool operator. */
1576     OH_NN_MAX_POOL_KERNEL_SIZE = 36,
1577     /** This enumerated value is used when the tensor is used as the <b>stride</b> parameter of the MaxPool operator. */
1578     OH_NN_MAX_POOL_STRIDE = 37,
1579     /** This enumerated value is used when the tensor is used as the <b>pad_mode</b> parameter of the MaxPool operator. */
1580     OH_NN_MAX_POOL_PAD_MODE = 38,
1581     /** This enumerated value is used when the tensor is used as the <b>pad</b> parameter of the MaxPool operator. */
1582     OH_NN_MAX_POOL_PAD = 39,
1583     /** This enumerated value is used when the tensor is used as the <b>activation_type</b> parameter of the MaxPool operator. */
1584     OH_NN_MAX_POOL_ACTIVATION_TYPE = 40,
1585 
1586     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter of the Mul operator. */
1587     OH_NN_MUL_ACTIVATION_TYPE = 41,
1588 
1589     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter of the OneHot operator. */
1590     OH_NN_ONE_HOT_AXIS = 42,
1591 
1592     /** This enumerated value is used when the tensor is used as the <b>constant_value</b> parameter of the Pad operator. */
1593     OH_NN_PAD_CONSTANT_VALUE = 43,
1594 
1595     /** This enumerated value is used when the tensor is used as the <b>activationType</b> parameter of the Scale operator. */
1596     OH_NN_SCALE_ACTIVATIONTYPE = 44,
1597     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter of the Scale operator. */
1598     OH_NN_SCALE_AXIS = 45,
1599 
1600     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter of the Softmax operator. */
1601     OH_NN_SOFTMAX_AXIS = 46,
1602 
1603     /** This enumerated value is used when the tensor is used as the <b>BlockShape</b> parameter of the SpaceToBatchND operator. */
1604     OH_NN_SPACE_TO_BATCH_ND_BLOCK_SHAPE = 47,
1605     /** This enumerated value is used when the tensor is used as the <b>Paddings</b> parameter of the SpaceToBatchND operator. */
1606     OH_NN_SPACE_TO_BATCH_ND_PADDINGS = 48,
1607 
1608     /** This enumerated value is used when the tensor is used as the <b>Axis</b> parameter of the Split operator. */
1609     OH_NN_SPLIT_AXIS = 49,
1610     /** This enumerated value is used when the tensor is used as the <b>OutputNum</b> parameter of the Split operator. */
1611     OH_NN_SPLIT_OUTPUT_NUM = 50,
1612     /** This enumerated value is used when the tensor is used as the <b>SizeSplits</b> parameter of the Split operator. */
1613     OH_NN_SPLIT_SIZE_SPLITS = 51,
1614 
1615     /** This enumerated value is used when the tensor is used as the <b>Axis</b> parameter of the Squeeze operator. */
1616     OH_NN_SQUEEZE_AXIS = 52,
1617 
1618     /** This enumerated value is used when the tensor is used as the <b>Axis</b> parameter of the Stack operator. */
1619     OH_NN_STACK_AXIS = 53,
1620 
1621     /** This enumerated value is used when the tensor is used as the <b>BeginMask</b> parameter of the StridedSlice operator. */
1622     OH_NN_STRIDED_SLICE_BEGIN_MASK = 54,
1623     /** This enumerated value is used when the tensor is used as the <b>EndMask</b> parameter of the StridedSlice operator. */
1624     OH_NN_STRIDED_SLICE_END_MASK = 55,
1625     /** This enumerated value is used when the tensor is used as the <b>EllipsisMask</b> parameter of the StridedSlice operator. */
1626     OH_NN_STRIDED_SLICE_ELLIPSIS_MASK = 56,
1627     /** This enumerated value is used when the tensor is used as the <b>NewAxisMask</b> parameter of the StridedSlice operator. */
1628     OH_NN_STRIDED_SLICE_NEW_AXIS_MASK = 57,
1629     /** This enumerated value is used when the tensor is used as the <b>ShrinkAxisMask</b> parameter of the StridedSlice operator. */
1630     OH_NN_STRIDED_SLICE_SHRINK_AXIS_MASK = 58,
1631 
1632     /** This enumerated value is used when the tensor is used as the <b>ActivationType</b> parameter of the Sub operator. */
1633     OH_NN_SUB_ACTIVATIONTYPE = 59,
1634 
1635     /** This enumerated value is used when the tensor is used as the <b>keep_dims</b> parameter of the ReduceMean operator. */
1636     OH_NN_REDUCE_MEAN_KEEP_DIMS = 60,
1637 
1638     /** This enumerated value is used when the tensor is used as the <b>new_height</b> parameter of the ResizeBilinear operator. */
1639     OH_NN_RESIZE_BILINEAR_NEW_HEIGHT = 61,
1640     /** This enumerated value is used when the tensor is used as the <b>new_width</b> parameter of the ResizeBilinear operator. */
1641     OH_NN_RESIZE_BILINEAR_NEW_WIDTH = 62,
1642     /** This enumerated value is used when the tensor is used as the <b>preserve_aspect_ratio</b> parameter of the ResizeBilinear operator. */
1643     OH_NN_RESIZE_BILINEAR_PRESERVE_ASPECT_RATIO = 63,
1644     /** This enumerated value is used when the tensor is used as the <b>coordinate_transform_mode</b> parameter of the ResizeBilinear operator. */
1645     OH_NN_RESIZE_BILINEAR_COORDINATE_TRANSFORM_MODE = 64,
1646     /** This enumerated value is used when the tensor is used as the <b>exclude_outside</b> parameter of the ResizeBilinear operator. */
1647     OH_NN_RESIZE_BILINEAR_EXCLUDE_OUTSIDE = 65,
1648 
1649     /** This enumerated value is used when the tensor is used as the <b>beginNormAxis</b> parameter of the LayerNorm operator. */
1650     OH_NN_LAYER_NORM_BEGIN_NORM_AXIS = 66,
1651     /** This enumerated value is used when the tensor is used as the <b>epsilon</b> parameter of the LayerNorm operator. */
1652     OH_NN_LAYER_NORM_EPSILON = 67,
1653     /** This enumerated value is used when the tensor is used as the <b>beginParamsAxis</b> parameter of the LayerNorm operator. */
1654     OH_NN_LAYER_NORM_BEGIN_PARAM_AXIS = 68,
1655     /** This enumerated value is used when the tensor is used as the <b>elementwiseAffine</b> parameter of the LayerNorm operator. */
1656     OH_NN_LAYER_NORM_ELEMENTWISE_AFFINE = 69,
1657 
1658     /** This enumerated value is used when the tensor is used as the <b>keep_dims</b> parameter of the ReduceProd operator. */
1659     OH_NN_REDUCE_PROD_KEEP_DIMS = 70,
1660 
1661     /** This enumerated value is used when the tensor is used as the <b>keep_dims</b> parameter of the ReduceAll operator. */
1662     OH_NN_REDUCE_ALL_KEEP_DIMS = 71,
1663 
1664     /** This enumerated value is used when the tensor is used as the <b>src_t</b> parameter of the QuantDTypeCast operator. */
1665     OH_NN_QUANT_DTYPE_CAST_SRC_T = 72,
1666     /** This enumerated value is used when the tensor is used as the <b>dst_t</b> parameter of the QuantDTypeCast operator. */
1667     OH_NN_QUANT_DTYPE_CAST_DST_T = 73,
1668 
1669     /** This enumerated value is used when the tensor is used as the <b>Sorted</b> parameter of the Topk operator. */
1670     OH_NN_TOP_K_SORTED = 74,
1671 
1672     /** This enumerated value is used when the tensor is used as the <b>axis</b> parameter of the ArgMax operator. */
1673     OH_NN_ARG_MAX_AXIS = 75,
1674     /** This enumerated value is used when the tensor is used as the <b>keepDims</b> parameter of the ArgMax operator. */
1675     OH_NN_ARG_MAX_KEEPDIMS = 76,
1676 
1677     /** This enumerated value is used when the tensor is used as the <b>Axis</b> parameter of the Unsqueeze operator. */
1678     OH_NN_UNSQUEEZE_AXIS = 77,
1679 } OH_NN_TensorType;
1680 
1681 /**
1682  * @brief This structure is used to store a 32-bit unsigned integer array.
1683  *
1684  * @since 9
1685  * @version 1.0
1686  */
1687 typedef struct OH_NN_UInt32Array {
1688     /** Pointer to the unsigned integer array */
1689     uint32_t *data;
1690     /** Array length */
1691     uint32_t size;
1692 } OH_NN_UInt32Array;
1693 
1694 /**
1695  * @brief Quantization information.
1696  *
1697  * In quantization scenarios, the 32-bit floating-point data type is quantized into the fixed-point data type according to the following formula:
1698  \f[
1699     q = clamp(round(\frac{r}{s}+z), q_{min}, q_{max})
1700  \f]
1701  * s and z are quantization parameters, which are stored by <b>scale</b> and <b>zeroPoint</b> in {@link OH_NN_QuantParam}.
1702  * r is a floating point number, q is the quantization result, q_min is the lower bound of the quantization result, and
1703  * q_max is an upper bound of a quantization result. The calculation method is as follows:
1704  *
1705  \f[
1706   \text{clamp}(x,min,max) =
1707   \begin{cases}
1708        q_{min} = -(1 << (numBits - 1)) \\
1709        q_{max} = (1 << (numBits - 1)) \\
1710    \end{cases}
1711  \f]
1712  * The clamp function is defined as follows:
1713  \f[
1714   \text{clamp}(x,min,max) =
1715   \begin{cases}
1716        \text{max} & \text{ if } x > \text{ max } \\
1717        \text{min} & \text{ if } x < \text{ min } \\
1718        x & \text{ otherwise } \\
1719    \end{cases}
1720  \f]
1721  *
1722  * @since 9
1723  * @version 1.0
1724  */
1725 typedef struct OH_NN_QuantParam {
1726     /** Specifies the length of the numBits, scale, and zeroPoint arrays. In the per-layer quantization scenario,
1727      *  <b>quantCount</b> is usually set to <b>1</b>. That is, all channels of a tensor share a set of quantization parameters.
1728      *  In the per-channel quantization scenario, <b>quantCount</b> is usually the same as the number of tensor channels,
1729      *  and each channel uses its own quantization parameters.
1730      */
1731     uint32_t quantCount;
1732     /** Number of quantization bits */
1733     const uint32_t *numBits;
1734     /** Pointer to the scale data in the quantization formula */
1735     const double *scale;
1736     /** Pointer to the zero point data in the quantization formula */
1737     const int32_t *zeroPoint;
1738 } OH_NN_QuantParam;
1739 
1740 /**
1741  * @brief Defines the tensor structure.
1742  *
1743  * It is usually used to construct data nodes and operator parameters in a model graph. When constructing a tensor,
1744  * you need to specify the data type, number of dimensions, dimension information, and quantization information.
1745  *
1746  * @since 9
1747  * @version 1.0
1748  */
1749 typedef struct OH_NN_Tensor {
1750     /** Data type of the specified tensor. The value must be an enumerated value of {@link OH_NN_DataType}. */
1751     OH_NN_DataType dataType;
1752     /** Number of dimensions of the specified tensor */
1753     uint32_t dimensionCount;
1754     /** Dimension information (shape) of the specified tensor*/
1755     const int32_t *dimensions;
1756     /** Quantization information of the specified tensor. The data type must be {@link OH_NN_QuantParam}. */
1757     const OH_NN_QuantParam *quantParam;
1758     /** Specifies the tensor type. The value of <b>type</b> is related to the tensor usage.
1759      *  When the tensor is used as the input or output of the model, set <b>type</b> to {@link OH_NN_TENSOR}.
1760      *  When a tensor is used as an operator parameter, select any enumerated value except {@link OH_NN_TENSOR} from {@link OH_NN_TensorType}.
1761      */
1762     OH_NN_TensorType type;
1763 } OH_NN_Tensor;
1764 
1765 /**
1766  * @brief Defines the memory structure.
1767  *
1768  * @since 9
1769  * @version 1.0
1770  */
1771 typedef struct OH_NN_Memory {
1772     /** Pointer to the shared memory. The shared memory is usually allocated by the underlying hardware driver. */
1773     void * const data;
1774     /** Records the length of the shared memory, in bytes. */
1775     const size_t length;
1776 } OH_NN_Memory;
1777 
1778 #ifdef __cplusplus
1779 }
1780 #endif // __cplusplus
1781 
1782 /** @} */
1783 #endif // NEURAL_NETWORK_RUNTIME_TYPE_H
1784