• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022-2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /**
17  * @addtogroup NeuralNetworkRuntime
18  * @{
19  *
20  * @brief Provides APIs of Neural Network Runtime for accelerating the model inference.
21  *
22  * @since 9
23  * @version 2.0
24  */
25 
26 /**
27  * @file neural_network_core.h
28  *
29  * @brief Defines the Neural Network Core APIs. The AI inference framework uses the Native APIs provided by
30  *        Neural Network Core to compile models and perform inference and computing on acceleration hardware.
31  *
32  * Note: Currently, the APIs of Neural Network Core do not support multi-thread calling. \n
33  *
34  * include "neural_network_runtime/neural_network_core.h"
35  * @library libneural_network_core.so
36  * @kit NeuralNetworkRuntimeKit
37  * @syscap SystemCapability.AI.NeuralNetworkRuntime
38  * @since 11
39  * @version 1.0
40  */
41 
42 #ifndef NEURAL_NETWORK_CORE_H
43 #define NEURAL_NETWORK_CORE_H
44 
45 #include "neural_network_runtime_type.h"
46 
47 #ifdef __cplusplus
48 extern "C" {
49 #endif
50 
51 /**
52  * @brief Creates a compilation instance of the {@link OH_NNCompilation} type.
53  *
54  * After the OH_NNModel module completes model construction, APIs provided by the OH_NNCompilation module pass the
55  * model to underlying device for compilation. This method creates a {@link OH_NNCompilation} instance
56  * based on the passed {@link OH_NNModel} instance. The {@link OH_NNCompilation_SetDevice} method is called
57  * to set the device to compile on, and {@link OH_NNCompilation_Build} is then called to complete compilation.\n
58  *
59  * In addition to computing device selection, the OH_NNCompilation module supports features such as model caching,
60  * performance preference, priority setting, and float16 computing, which can be implemented by the following methods:\n
61  * {@link OH_NNCompilation_SetCache}\n
62  * {@link OH_NNCompilation_SetPerformanceMode}\n
63  * {@link OH_NNCompilation_SetPriority}\n
64  * {@link OH_NNCompilation_EnableFloat16}\n
65  *
66  * After {@link OH_NNCompilation_Build} is called, the {@link OH_NNModel} instance can be released.\n
67  *
68  * @param model Pointer to the {@link OH_NNModel} instance.
69  * @return Pointer to a {@link OH_NNCompilation} instance, or NULL if it fails to create. The possible reason for
70  *         failure is that the parameters of model are invalid or there is a problem with the model format.
71  * @since 9
72  * @version 1.0
73  */
74 OH_NNCompilation *OH_NNCompilation_Construct(const OH_NNModel *model);
75 
76 /**
77  * @brief Creates a compilation instance based on an offline model file.
78  *
79  * This method conflicts with the way of passing an online built model or an offline model file buffer,
80  * and you have to choose only one of the three construction methods. \n
81  *
82  * Offline model is a type of model that is offline compiled by the model converter provided by a device vendor.
83  * So that the offline model can only be used on the specified device, but the compilation time of offline model is usually
84  * much less than {@link OH_NNModel}. \n
85  *
86  * You should perform the offline compilation during your development and deploy the offline model in your app package. \n
87  *
88  * @param modelPath Offline model file path.
89  * @return Pointer to an {@link OH_NNCompilation} instance, or NULL if it fails to create. The possible reason for
90  *         failure is that the modelPath is invalid.
91  * @since 11
92  * @version 1.0
93  */
94 OH_NNCompilation *OH_NNCompilation_ConstructWithOfflineModelFile(const char *modelPath);
95 
96 /**
97  * @brief Creates a compilation instance based on an offline model file buffer.
98  *
99  * This method conflicts with the way of passing an online built model or an offline model file path,
100  * and you have to choose only one of the three construction methods. \n
101  *
102  * Note that the returned {@link OH_NNCompilation} instance only saves the <b>modelBuffer</b> pointer inside, instead of
103  * copying its data. You should not release <b>modelBuffer</b> before the {@link OH_NNCompilation} instance is destroied. \n
104  *
105  * @param modelBuffer Offline model file buffer.
106  * @param modelSize Offfline model buffer size.
107  * @return Pointer to an {@link OH_NNCompilation} instance, or NULL if it fails to create. The possible reason for
108  *         failure is that the modelBuffer or modelSize is invalid.
109  * @since 11
110  * @version 1.0
111  */
112 OH_NNCompilation *OH_NNCompilation_ConstructWithOfflineModelBuffer(const void *modelBuffer, size_t modelSize);
113 
114 /**
115  * @brief Creates a empty compilation instance for restoration from cache later.
116  *
117  * See {@link OH_NNCompilation_SetCache} for the description of cache.\n
118  *
119  * The restoration time from the cache is less than compilation with {@link OH_NNModel}.\n
120  *
121  * You should call {@link OH_NNCompilation_SetCache} or {@link OH_NNCompilation_ImportCacheFromBuffer} first,
122  * and then call {@link OH_NNCompilation_Build} to complete the restoration.\n
123  *
124  * @return Pointer to an {@link OH_NNCompilation} instance, or NULL if it fails to create. The possible reason for
125  *         failure is that the cache file saved before is invalid.
126  * @since 11
127  * @version 1.0
128  */
129 OH_NNCompilation *OH_NNCompilation_ConstructForCache();
130 
131 /**
132  * @brief Exports the cache to a given buffer.
133  *
134  * See {@link OH_NNCompilation_SetCache} for the description of cache.\n
135  *
136  * Note that the cache is the result of compilation building {@link OH_NNCompilation_Build},
137  * so that this method must be called after {@link OH_NNCompilation_Build}.\n
138  *
139  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
140  * @param buffer Pointer to the given buffer.
141  * @param length Buffer length.
142  * @param modelSize Byte size of the model cache.
143  * @return Execution result of the function.
144  *         {@link OH_NN_SUCCESS} export cache to buffer successfully.\n
145  *         {@link OH_NN_INVALID_PARAMETER} fail to export cache to buffer. The possible reason for failure
146  *         is that the <b>compilation</b>, <b>buffer</b> or <b>modelSize</b> is nullptr, or <b>length</b> is 0,
147  *         or <b>compilation</b> is invalid.\n
148  *         {@link OH_NN_UNSUPPORTED} exporting cache to buffer is unsupported.\n
149  * @since 11
150  * @version 1.0
151  */
152 OH_NN_ReturnCode OH_NNCompilation_ExportCacheToBuffer(OH_NNCompilation *compilation,
153                                                       const void *buffer,
154                                                       size_t length,
155                                                       size_t *modelSize);
156 
157 /**
158  * @brief Imports the cache from a given buffer.
159  *
160  * See {@link OH_NNCompilation_SetCache} for the description of cache.\n
161  *
162  * {@link OH_NNCompilation_Build} should be called to complete the restoration after
163  * {@link OH_NNCompilation_ImportCacheFromBuffer} is called.\n
164  *
165  * Note that <b>compilation</b> only saves the <b>buffer</b> pointer inside, instead of copying its data. You should not
166  * release <b>buffer</b> before <b>compilation</b> is destroied.\n
167  *
168  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
169  * @param buffer Pointer to the given buffer.
170  * @param modelSize Byte size of the model cache.
171  * @return Execution result of the function.
172  *         {@link OH_NN_SUCCESS} import cache from buffer successfully.\n
173  *         {@link OH_NN_INVALID_PARAMETER} fail to import cache from buffer. The possible reason for failure is that
174  *         the <b>compilation</b> or <b>buffer</b> is nullptr, or <b>modelSize</b> is 0, or content of <b>buffer</b>
175  *         is invalid.\n
176  * @since 11
177  * @version 1.0
178  */
179 OH_NN_ReturnCode OH_NNCompilation_ImportCacheFromBuffer(OH_NNCompilation *compilation,
180                                                         const void *buffer,
181                                                         size_t modelSize);
182 
183 /**
184  * @brief Adds an extension config for a custom hardware attribute.
185  *
186  * Some devices have their own specific attributes which have not been opened in NNRt. This method provides an additional way for you
187  * to set these custom hardware attributes of the device. You should query their names and values from the device
188  * vendor's documents, and add them into compilation instance one by one. These attributes will be passed directly to device
189  * driver, and this method will return error code if the driver cannot parse them. \n
190  *
191  * After {@link OH_NNCompilation_Build} is called, the <b>configName</b> and <b>configValue</b> can be released. \n
192  *
193  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
194  * @param configName Config name.
195  * @param configValue A byte buffer saving the config value.
196  * @param configValueSize Byte size of the config value.
197  * @return Execution result of the function.
198  *         {@link OH_NN_SUCCESS} add extension config successfully.\n
199  *         {@link OH_NN_INVALID_PARAMETER} fail to add extension config. The possible reason for failure is that the
200  *         <b>compilation</b>, <b>configName</b> or <b>configValue</b> is nullptr, or <b>configValueSize</b> is 0.\n
201  *         {@link OH_NN_FAILED} other failures, such as memory error during object creation.\n
202  * @since 11
203  * @version 1.0
204  */
205 OH_NN_ReturnCode OH_NNCompilation_AddExtensionConfig(OH_NNCompilation *compilation,
206                                                      const char *configName,
207                                                      const void *configValue,
208                                                      const size_t configValueSize);
209 
210 /**
211  * @brief Specifies the device for model compilation and computing.
212  *
213  * In the compilation phase, you need to specify the device for model compilation and computing. Call {@link OH_NNDevice_GetAllDevicesID}
214  * to obtain available device IDs. Call {@link OH_NNDevice_GetType} and {@link OH_NNDevice_GetName} to obtain device information
215  * and pass target device ID to this method for setting. \n
216  *
217  *
218  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
219  * @param deviceID Device id. If it is 0, the first device in the current device list will be used by default.
220  * @return Execution result of the function.
221  *         {@link OH_NN_SUCCESS} set device successfully.\n
222  *         {@link OH_NN_INVALID_PARAMETER} fail to set device. The possible reason for failure
223  *         is that the <b>compilation</b> is nullptr.\n
224  * @since 9
225  * @version 1.0
226  */
227 OH_NN_ReturnCode OH_NNCompilation_SetDevice(OH_NNCompilation *compilation, size_t deviceID);
228 
229 /**
230  * @brief Set the cache directory and version of the compiled model.
231  *
232  * On the device that supports caching, a model can be saved as a cache file after being compiled on the device driver.
233  * The model can be directly read from the cache file in the next compilation, saving recompilation time.
234  * This method performs different operations based on the passed cache directory and version: \n
235  *
236  * - No file exists in the cache directory:
237  * Caches the compiled model to the directory and sets the cache version to <b>version</b>. \n
238  *
239  * - A complete cache file exists in the cache directory, and its version is <b>version</b>:
240  * Reads the cache file in the path and passes the data to the underlying device for conversion into executable model instances. \n
241  *
242  * - A complete cache file exists in the cache directory, and its version is earlier than <b>version</b>:
243  * When model compilation is complete on the underlying device, overwrites the cache file and changes the version number to <b>version</b>. \n
244  *
245  * - A complete cache file exists in the cache directory, and its version is later than <b>version</b>:
246  * Returns the {@link OH_NN_INVALID_PARAMETER} error code without reading the cache file. \n
247  *
248  * - The cache file in the cache directory is incomplete or you do not have the permission to access the cache file.
249  * Returns the {@link OH_NN_INVALID_FILE} error code. \n
250  *
251  * - The cache directory does not exist or you do not have the access permission.
252  * Returns the {@link OH_NN_INVALID_PATH} error code. \n
253  *
254  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
255  * @param cachePath Directory for storing model cache files. This method creates directories for different devices in the <b>cachePath</b> directory.
256  *                  You are advised to use a separate cache directory for each model.
257  * @param version Cache version.
258  * @return Execution result of the function.
259  *         {@link OH_NN_SUCCESS} set cache path and version successfully.\n
260  *         {@link OH_NN_INVALID_PARAMETER} fail to set cache path and version. The possible reason for failure
261  *         is that the <b>compilation</b> or <b>cachePath</b> is nullptr.\n
262  * @since 9
263  * @version 1.0
264  */
265 OH_NN_ReturnCode OH_NNCompilation_SetCache(OH_NNCompilation *compilation, const char *cachePath, uint32_t version);
266 
267 /**
268  * @brief Sets the performance mode for model computing.
269  *
270  * Allows you to set the performance mode for model computing to meet the requirements of low power consumption
271  * and ultimate performance. If this method is not called to set the performance mode in the compilation phase, the compilation instance assigns
272  * the {@link OH_NN_PERFORMANCE_NONE} mode for the model by default. In this case, the device performs computing in the default performance mode. \n
273  *
274  * If this method is called on the device that does not support the setting of the performance mode, the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n
275  *
276  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
277  * @param performanceMode Performance mode. For details about the available performance modes, see {@link OH_NN_PerformanceMode}.
278  * @return Execution result of the function.
279  *         {@link OH_NN_SUCCESS} set performance mode successfully.\n
280  *         {@link OH_NN_INVALID_PARAMETER} fail to set performance mode. The possible reason for failure
281  *         is that the <b>compilation</b> is nullptr, or <b>performanceMode</b> is invalid.\n
282  *         {@link OH_NN_FAILED} fail to query whether the backend device supports setting performance mode.\n
283  *         {@link OH_NN_OPERATION_FORBIDDEN} the backend device is not supported to set performance mode.\n
284  * @since 9
285  * @version 1.0
286  */
287 OH_NN_ReturnCode OH_NNCompilation_SetPerformanceMode(OH_NNCompilation *compilation,
288                                                      OH_NN_PerformanceMode performanceMode);
289 
290 /**
291  * @brief Sets the model computing priority.
292  *
293  * Allows you to set computing priorities for models.
294  * The priorities apply only to models created by the process with the same UID.
295  * The settings will not affect models created by processes with different UIDs on different devices. \n
296  *
297  * If this method is called on the device that does not support the priority setting, the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n
298  *
299  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
300  * @param priority Priority. For details about the optional priorities, see {@link OH_NN_Priority}.
301  * @return Execution result of the function.
302  *         {@link OH_NN_SUCCESS} set priority successfully.\n
303  *         {@link OH_NN_INVALID_PARAMETER} fail to set priority. The possible reason for failure
304  *         is that the <b>compilation</b> is nullptr, or <b>priority</b> is invalid.\n
305  *         {@link OH_NN_FAILED} fail to query whether the backend device supports setting priority.\n
306  *         {@link OH_NN_OPERATION_FORBIDDEN} the backend device is not supported to set priority.\n
307  * @since 9
308  * @version 1.0
309  */
310 OH_NN_ReturnCode OH_NNCompilation_SetPriority(OH_NNCompilation *compilation, OH_NN_Priority priority);
311 
312 /**
313  * @brief Enables float16 for computing.
314  *
315  * Float32 is used by default for the model of float type. If this method is called on a device that supports float16,
316  * float16 will be used for computing the float32 model to reduce memory usage and execution time. \n
317  *
318  * This option is useless for the model of int type, e.g. int8 type. \n
319  *
320  * If this method is called on the device that does not support float16, the {@link OH_NN_UNAVALIDABLE_DEVICE} error code is returned. \n
321  *
322  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
323  * @param enableFloat16 Indicates whether to enable float16. If this parameter is set to <b>true</b>, float16 inference is performed.
324  *                      If this parameter is set to <b>false</b>, float32 inference is performed.
325  * @return Execution result of the function.
326  *         {@link OH_NN_SUCCESS} enable fp16 successfully.\n
327  *         {@link OH_NN_INVALID_PARAMETER} fail to enable fp16. The possible reason for failure
328  *         is that the <b>compilation</b> is nullptr.\n
329  * @since 9
330  * @version 1.0
331  */
332 OH_NN_ReturnCode OH_NNCompilation_EnableFloat16(OH_NNCompilation *compilation, bool enableFloat16);
333 
334 /**
335  * @brief Compiles a model.
336  *
337  * After the compilation configuration is complete, call this method to return the compilation result. The compilation instance pushes the model and
338  * compilation options to the device for compilation. After this method is called, additional compilation operations cannot be performed. \n
339  *
340  * If the {@link OH_NNCompilation_SetDevice}, {@link OH_NNCompilation_SetCache}, {@link OH_NNCompilation_SetPerformanceMode},
341  * {@link OH_NNCompilation_SetPriority}, and {@link OH_NNCompilation_EnableFloat16} methods are called, {@link OH_NN_OPERATION_FORBIDDEN} is returned. \n
342  *
343  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
344  * @return Execution result of the function.
345  *         {@link OH_NN_SUCCESS} build model successfully.\n
346  *         {@link OH_NN_INVALID_PARAMETER} fail to build model. The possible reason for failure
347  *         is that the <b>compilation</b> is nullptr, or the parameters set before is invalid.\n
348  *         {@link OH_NN_FAILED} fail to build model.\n
349  *         {@link OH_NN_OPERATION_FORBIDDEN} the backend device is not supported the model.\n
350  * @since 9
351  * @version 1.0
352  */
353 OH_NN_ReturnCode OH_NNCompilation_Build(OH_NNCompilation *compilation);
354 
355 /**
356  * @brief Releases the <b>Compilation</b> object.
357  *
358  * This method needs to be called to release the compilation instance created by {@link OH_NNCompilation_Construct},
359  * {@link OH_NNCompilation_ConstructWithOfflineModelFile}, {@link OH_NNCompilation_ConstructWithOfflineModelBuffer} and
360  * {@link OH_NNCompilation_ConstructForCache}. Otherwise, the memory leak will occur. \n
361  *
362  * If <b>compilation</b> or <b>*compilation</b> is a null pointer, this method only prints warning logs and does not execute the release. \n
363  *
364  * @param compilation Double pointer to the {@link OH_NNCompilation} instance. After a compilation instance is destroyed,
365  *                    this method sets <b>*compilation</b> to a null pointer.
366  * @since 9
367  * @version 1.0
368  */
369 void OH_NNCompilation_Destroy(OH_NNCompilation **compilation);
370 
371 
372 /**
373  * @brief Creates an {@link NN_TensorDesc} instance.
374  *
375  * The {@link NN_TensorDesc} describes various tensor attributes, such as name/data type/shape/format, etc.\n
376  *
377  * The following methods can be called to create a {@link NN_Tensor} instance based on the passed {@link NN_TensorDesc}
378  * instance:\n
379  * {@link OH_NNTensor_Create}\n
380  * {@link OH_NNTensor_CreateWithSize}\n
381  * {@link OH_NNTensor_CreateWithFd}\n
382  *
383  * Note that these methods will copy the {@link NN_TensorDesc} instance into {@link NN_Tensor}. Therefore you can create
384  * multiple {@link NN_Tensor} instances with the same {@link NN_TensorDesc} instance. And you should destroy the
385  * {@link NN_TensorDesc} instance by {@link OH_NNTensorDesc_Destroy} when it is no longer used.\n
386  *
387  * @return Pointer to a {@link NN_TensorDesc} instance, or NULL if it fails to create. The possible reason for failure
388  *         is that the memory error occurred during object creation.
389  * @since 11
390  * @version 1.0
391  */
392 NN_TensorDesc *OH_NNTensorDesc_Create();
393 
394 /**
395  * @brief Releases an {@link NN_TensorDesc} instance.
396  *
397  * When the {@link NN_TensorDesc} instance is no longer used, this method needs to be called to release it. Otherwise,
398  * the memory leak will occur. \n
399  *
400  * If <b>tensorDesc</b> or <b>*tensorDesc</b> is a null pointer, this method will return error code and does not execute the release. \n
401  *
402  * @param tensorDesc Double pointer to the {@link NN_TensorDesc} instance.
403  * @return Execution result of the function.
404  *         {@link OH_NN_SUCCESS} destroy tensor description successfully.\n
405  *         {@link OH_NN_INVALID_PARAMETER} fail to destroy tensor description. The possible reason for failure
406  *         is that the <b>tensorDesc</b> or <b>*tensorDesc</b> is nullptr.\n
407  * @since 11
408  * @version 1.0
409  */
410 OH_NN_ReturnCode OH_NNTensorDesc_Destroy(NN_TensorDesc **tensorDesc);
411 
412 /**
413  * @brief Sets the name of a {@link NN_TensorDesc}.
414  *
415  * After the {@link NN_TensorDesc} instance is created, call this method to set the tensor name.
416  * The value of <b>*name</b> is a C-style string ended with <b>'\0'</b>.\n
417  *
418  * if <b>tensorDesc</b> or <b>name</b> is a null pointer, this method will return error code.\n
419  *
420  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
421  * @param name The name of the tensor that needs to be set.
422  * @return Execution result of the function.
423  *         {@link OH_NN_SUCCESS} set tensor name successfully.\n
424  *         {@link OH_NN_INVALID_PARAMETER} fail to set tensor name. The possible reason for failure
425  *         is that the <b>tensorDesc</b> or <b>name</b> is nullptr.\n
426  * @since 11
427  * @version 1.0
428  */
429 OH_NN_ReturnCode OH_NNTensorDesc_SetName(NN_TensorDesc *tensorDesc, const char *name);
430 
431 /**
432  * @brief Gets the name of a {@link NN_TensorDesc}.
433  *
434  * Call this method to obtain the name of the specified {@link NN_TensorDesc} instance.
435  * The value of <b>*name</b> is a C-style string ended with <b>'\0'</b>.\n
436  *
437  * if <b>tensorDesc</b> or <b>name</b> is a null pointer, this method will return error code.
438  * As an output parameter, <b>*name</b> must be a null pointer, otherwise the method will return an error code.
439  * Fou example, you should define char* tensorName = NULL, and pass &tensorName as the argument of <b>name</b>.\n
440  *
441  * You do not need to release the memory of <b>name</b>. It will be released when <b>tensorDesc</b> is destroied.\n
442  *
443  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
444  * @param name The retured name of the tensor.
445  * @return Execution result of the function.
446  *         {@link OH_NN_SUCCESS} get tensor name successfully.\n
447  *         {@link OH_NN_INVALID_PARAMETER} fail to get tensor name. The possible reason for failure
448  *         is that the <b>tensorDesc</b> or <b>name</b> is nullptr, or <b>*name</b> is not nullptr.\n
449  * @since 11
450  * @version 1.0
451  */
452 OH_NN_ReturnCode OH_NNTensorDesc_GetName(const NN_TensorDesc *tensorDesc, const char **name);
453 
454 /**
455  * @brief Sets the data type of a {@link NN_TensorDesc}.
456  *
457  * After the {@link NN_TensorDesc} instance is created, call this method to set the tensor data type. \n
458  *
459  * if <b>tensorDesc</b> is a null pointer, this method will return error code. \n
460  *
461  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
462  * @param dataType The data type of the tensor that needs to be set.
463  * @return Execution result of the function.
464  *         {@link OH_NN_SUCCESS} set tensor data type successfully.\n
465  *         {@link OH_NN_INVALID_PARAMETER} fail to set tensor data type. The possible reason for failure
466  *         is that the <b>tensorDesc</b> is nullptr, or <b>dataType</b> is invalid.\n
467  * @since 11
468  * @version 1.0
469  */
470 OH_NN_ReturnCode OH_NNTensorDesc_SetDataType(NN_TensorDesc *tensorDesc, OH_NN_DataType dataType);
471 
472 /**
473  * @brief Gets the data type of a {@link NN_TensorDesc}.
474  *
475  * Call this method to obtain the data type of the specified {@link NN_TensorDesc} instance. \n
476  *
477  * if <b>tensorDesc</b> or <b>dataType</b> is a null pointer, this method will return error code. \n
478  *
479  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
480  * @param dataType The returned data type of the tensor.
481  * @return Execution result of the function.
482  *         {@link OH_NN_SUCCESS} get tensor data type successfully.\n
483  *         {@link OH_NN_INVALID_PARAMETER} fail to get tensor data type. The possible reason for failure
484  *         is that the <b>tensorDesc</b> or <b>dataType</b> is nullptr.\n
485  * @since 11
486  * @version 1.0
487  */
488 OH_NN_ReturnCode OH_NNTensorDesc_GetDataType(const NN_TensorDesc *tensorDesc, OH_NN_DataType *dataType);
489 
490 /**
491  * @brief Sets the shape of a {@link NN_TensorDesc}.
492  *
493  * After the {@link NN_TensorDesc} instance is created, call this method to set the tensor shape. \n
494  *
495  * if <b>tensorDesc</b> or <b>shape</b> is a null pointer, or <b>shapeLength</b> is 0, this method will return error code. \n
496  *
497  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
498  * @param shape The shape list of the tensor that needs to be set.
499  * @param shapeLength The length of the shape list that needs to be set.
500  * @return Execution result of the function.
501  *         {@link OH_NN_SUCCESS} set tensor shape successfully.\n
502  *         {@link OH_NN_INVALID_PARAMETER} fail to set tensor shape. The possible reason for failure
503  *         is that the <b>tensorDesc</b> or <b>shape</b> is nullptr, or <b>shapeLength</b> is 0.\n
504  * @since 11
505  * @version 1.0
506  */
507 OH_NN_ReturnCode OH_NNTensorDesc_SetShape(NN_TensorDesc *tensorDesc, const int32_t *shape, size_t shapeLength);
508 
509 /**
510  * @brief Gets the shape of a {@link NN_TensorDesc}.
511  *
512  * Call this method to obtain the shape of the specified {@link NN_TensorDesc} instance. \n
513  *
514  * if <b>tensorDesc</b>, <b>shape</b> or <b>shapeLength</b> is a null pointer, this method will return error code.
515  * As an output parameter, <b>*shape</b> must be a null pointer, otherwise the method will return an error code.
516  * Fou example, you should define int32_t* tensorShape = NULL, and pass &tensorShape as the argument of <b>shape</b>. \n
517  *
518  * You do not need to release the memory of <b>shape</b>. It will be released when <b>tensorDesc</b> is destroied. \n
519  *
520  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
521  * @param shape Return the shape list of the tensor.
522  * @param shapeLength The returned length of the shape list.
523  * @return Execution result of the function.
524  *         {@link OH_NN_SUCCESS} get tensor shape successfully.\n
525  *         {@link OH_NN_INVALID_PARAMETER} fail to get tensor shape. The possible reason for failure is that the
526  *         <b>tensorDesc</b>, <b>shape</b> or <b>shapeLength</b> is nullptr, or <b>*shape</b> is not nullptr.\n
527  * @since 11
528  * @version 1.0
529  */
530 OH_NN_ReturnCode OH_NNTensorDesc_GetShape(const NN_TensorDesc *tensorDesc, int32_t **shape, size_t *shapeLength);
531 
532 /**
533  * @brief Sets the format of a {@link NN_TensorDesc}.
534  *
535  * After the {@link NN_TensorDesc} instance is created, call this method to set the tensor format. \n
536  *
537  * if <b>tensorDesc</b> is a null pointer, this method will return error code. \n
538  *
539  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
540  * @param format The format of the tensor that needs to be set.
541  * @return Execution result of the function.
542  *         {@link OH_NN_SUCCESS} set tensor format successfully.\n
543  *         {@link OH_NN_INVALID_PARAMETER} fail to set tensor format. The possible reason for failure
544  *         is that the <b>tensorDesc</b> is nullptr, or <b>format</b> is invalid.\n
545  * @since 11
546  * @version 1.0
547  */
548 OH_NN_ReturnCode OH_NNTensorDesc_SetFormat(NN_TensorDesc *tensorDesc, OH_NN_Format format);
549 
550 /**
551  * @brief Gets the format of a {@link NN_TensorDesc}.
552  *
553  * Call this method to obtain the format of the specified {@link NN_TensorDesc} instance. \n
554  *
555  * if <b>tensorDesc</b> or <b>format</b> is a null pointer, this method will return error code. \n
556  *
557  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
558  * @param format The returned format of the tensor.
559  * @return Execution result of the function.
560  *         {@link OH_NN_SUCCESS} get tensor format successfully.\n
561  *         {@link OH_NN_INVALID_PARAMETER} fail to get tensor format. The possible reason for failure
562  *         is that the <b>tensorDesc</b> or <b>format</b> is nullptr.\n
563  * @since 11
564  * @version 1.0
565  */
566 OH_NN_ReturnCode OH_NNTensorDesc_GetFormat(const NN_TensorDesc *tensorDesc, OH_NN_Format *format);
567 
568 /**
569  * @brief Gets the element count of a {@link NN_TensorDesc}.
570  *
571  * Call this method to obtain the element count of the specified {@link NN_TensorDesc} instance.
572  * If you need to obtain byte size of the tensor data, call {@link OH_NNTensorDesc_GetByteSize}. \n
573  *
574  * If the tensor shape is dynamic, this method will return error code, and <b>elementCount</b> will be 0. \n
575  *
576  * if <b>tensorDesc</b> or <b>elementCount</b> is a null pointer, this method will return error code. \n
577  *
578  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
579  * @param elementCount The returned element count of the tensor.
580  * @return Execution result of the function.
581  *         {@link OH_NN_SUCCESS} get tensor element count successfully.\n
582  *         {@link OH_NN_INVALID_PARAMETER} fail to get tensor element count. The possible reason for failure
583  *         is that the <b>tensorDesc</b> or <b>elementCount</b> is nullptr.\n
584  *         {@link OH_NN_DYNAMIC_SHAPE} dim is less than zero.\n
585  * @since 11
586  * @version 1.0
587  */
588 OH_NN_ReturnCode OH_NNTensorDesc_GetElementCount(const NN_TensorDesc *tensorDesc, size_t *elementCount);
589 
590 /**
591  * @brief Gets the byte size of a {@link NN_TensorDesc}.
592  *
593  * Call this method to obtain the byte size of the specified {@link NN_TensorDesc} instance. \n
594  *
595  * If the tensor shape is dynamic, this method will return error code, and <b>byteSize</b> will be 0. \n
596  *
597  * If you need to obtain element count of the tensor data, call {@link OH_NNTensorDesc_GetElementCount}. \n
598  *
599  * if <b>tensorDesc</b> or <b>byteSize</b> is a null pointer, this method will return error code. \n
600  *
601  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
602  * @param byteSize The returned byte size of the tensor.
603  * @return Execution result of the function.
604  *         {@link OH_NN_SUCCESS} get tensor byte size successfully.\n
605  *         {@link OH_NN_INVALID_PARAMETER} fail to get tensor byte size. The possible reason for failure
606  *         is that the <b>tensorDesc</b> or <b>byteSize</b> is nullptr, or tensor data type is invalid.\n
607  *         {@link OH_NN_DYNAMIC_SHAPE} dim is less than zero.\n
608  * @since 11
609  * @version 1.0
610  */
611 OH_NN_ReturnCode OH_NNTensorDesc_GetByteSize(const NN_TensorDesc *tensorDesc, size_t *byteSize);
612 
613 /**
614  * @brief Creates a {@link NN_Tensor} instance from {@link NN_TensorDesc}.
615  *
616  * This method use {@link OH_NNTensorDesc_GetByteSize} to calculate the byte size of tensor data and allocate shared
617  * memory on device for it. The device dirver will get the tensor data directly by the "zero-copy" way.\n
618  *
619  * Note that this method will copy the <b>tensorDesc</b> into {@link NN_Tensor}. Therefore you should destroy
620  * <b>tensorDesc</b> by {@link OH_NNTensorDesc_Destroy} if it is no longer used.\n
621  *
622  * If the tensor shape is dynamic, this method will return error code.\n
623  *
624  * <b>deviceID</b> indicates the selected device. If it is 0, the first device in the current device list will be used
625  * by default.\n
626  *
627  * <b>tensorDesc</b> must be provided, and this method will return an error code if it is a null pointer.\n
628  *
629  * Call {@link OH_NNTensor_Destroy} to release the {@link NN_Tensor} instance if it is no longer used.\n
630  *
631  * @param deviceID Device id. If it is 0, the first device in the current device list will be used by default.
632  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
633  * @return Pointer to a {@link NN_Tensor} instance, or NULL if it fails to create. The possible reason for failure
634  *         is that the <b>tensorDesc</b> is nullptr, or <b>deviceID</b> is invalid, or memory error occurred.
635  * @since 11
636  * @version 1.0
637  */
638 NN_Tensor *OH_NNTensor_Create(size_t deviceID, NN_TensorDesc *tensorDesc);
639 
640 /**
641  * @brief Creates a {@link NN_Tensor} instance with specified size and {@link NN_TensorDesc}.
642  *
643  * This method use <b>size</b> as the byte size of tensor data and allocate shared memory on device for it.
644  * The device dirver will get the tensor data directly by the "zero-copy" way.\n
645  *
646  * Note that this method will copy the <b>tensorDesc</b> into {@link NN_Tensor}. Therefore you should destroy
647  * <b>tensorDesc</b> by {@link OH_NNTensorDesc_Destroy} if it is no longer used.\n
648  *
649  * <b>deviceID</b> indicates the selected device. If it is 0, the first device in the current device list will be used
650  * by default.\n
651  *
652  * <b>tensorDesc</b> must be provided, if it is a null pointer, the method returns an error code.
653  * <b>size</b> must be no less than the byte size of tensorDesc. Otherwise, this method will return an error code.
654  * If the tensor shape is dynamic, the <b>size</b> will not be checked.\n
655  *
656  * Call {@link OH_NNTensor_Destroy} to release the {@link NN_Tensor} instance if it is no longer used.\n
657  *
658  * @param deviceID Device id. If it is 0, the first device in the current device list will be used by default.
659  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
660  * @param size Size of tensor data that need to be allocated.
661  * @return Pointer to a {@link NN_Tensor} instance, or NULL if it fails to create. The possible reason for failure
662  *         is that the <b>tensorDesc</b> is nullptr, or <b>deviceID</b> or size is invalid, or memory error occurred.
663  * @since 11
664  * @version 1.0
665  */
666 NN_Tensor *OH_NNTensor_CreateWithSize(size_t deviceID, NN_TensorDesc *tensorDesc, size_t size);
667 
668 /**
669  * @brief Creates a {@link NN_Tensor} instance with specified file descriptor and {@link NN_TensorDesc}.
670  *
671  * This method reuses the shared memory corresponding to the file descriptor <b>fd</b> passed. It may comes from another
672  * {@link NN_Tensor} instance. When you call the {@link OH_NNTensor_Destroy} method to release the tensor created by
673  * this method, the tensor data memory will not be released.\n
674  *
675  * Note that this method will copy the <b>tensorDesc</b> into {@link NN_Tensor}. Therefore you should destroy
676  *  <b>tensorDesc</b> by {@link OH_NNTensorDesc_Destroy} if it is no longer used.\n
677  *
678  * <b>deviceID</b> indicates the selected device. If it is 0, the first device in the current device list will be used
679  * by default.\n
680  *
681  * <b>tensorDesc</b> must be provided, if it is a null pointer, the method returns an error code.\n
682  *
683  * Call {@link OH_NNTensor_Destroy} to release the {@link NN_Tensor} instance if it is no longer used.\n
684  *
685  * @param deviceID Device id. If it is 0, the first device in the current device list will be used by default.
686  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance.
687  * @param fd file descriptor of the shared memory to be resued.
688  * @param size Size of the shared memory to be resued.
689  * @param offset Offset of the shared memory to be resued.
690  * @return Pinter to a {@link NN_Tensor} instance, or NULL if it fails to create. The possible reason for failure
691  *         is that the <b>tensorDesc</b> is nullptr, or <b>deviceID</b>, <b>fd</b>, <b>size</b> or <b>offset</b> is
692  *         invalid, or memory error occurred.
693  * @since 11
694  * @version 1.0
695  */
696 NN_Tensor *OH_NNTensor_CreateWithFd(size_t deviceID,
697                                     NN_TensorDesc *tensorDesc,
698                                     int fd,
699                                     size_t size,
700                                     size_t offset);
701 
702 /**
703  * @brief Releases a {@link NN_Tensor} instance.
704  *
705  * When the {@link NN_Tensor} instance is no longer used, this method needs to be called to release the instance.
706  * Otherwise, the memory leak will occur.\n
707  *
708  * If <b>tensor</b> or <b>*tensor</b> is a null pointer, this method will return error code and does not execute the
709  * release.\n
710  *
711  * @param tensor Double pointer to the {@link NN_Tensor} instance.
712  * @return Execution result of the function.
713  *         {@link OH_NN_SUCCESS} destroy tensor successfully.\n
714  *         {@link OH_NN_INVALID_PARAMETER} fail to destroy tensor. The possible reason for failure
715  *         is that the <b>tensor</b> is nullptr, or <b>*tensor</b> is not nullptr.\n
716  * @since 11
717  * @version 1.0
718  */
719 OH_NN_ReturnCode OH_NNTensor_Destroy(NN_Tensor **tensor);
720 
721 /**
722  * @brief Gets the {@link NN_TensorDesc} instance of a {@link NN_Tensor}.
723  *
724  * Call this method to obtain the inner {@link NN_TensorDesc} instance pointer of the specified {@link NN_Tensor}
725  * instance. You can get various types of the tensor attributes such as name/format/data type/shape from the returned
726  * {@link NN_TensorDesc} instance.\n
727  *
728  * You should not destory the returned {@link NN_TensorDesc} instance because it points to the inner instance of
729  * {@link NN_Tensor}. Otherwise, a menory corruption of double free will occur when {@link OH_NNTensor_Destroy}
730  * is called.\n
731  *
732  * if <b>tensor</b> is a null pointer, this method will return null pointer.\n
733  *
734  * @param tensor Pointer to the {@link NN_Tensor} instance.
735  * @return Pointer to the {@link NN_TensorDesc} instance, or NULL if it fails to create. The possible reason for
736  *         failure is that the <b>tensor</b> is nullptr, or <b>tensor</b> is invalid.
737  * @since 11
738  * @version 1.0
739  */
740 NN_TensorDesc *OH_NNTensor_GetTensorDesc(const NN_Tensor *tensor);
741 
742 /**
743  * @brief Gets the data buffer of a {@link NN_Tensor}.
744  *
745  * You can read/write data from/to the tensor data buffer. The buffer is mapped from a shared memory on device,
746  * so the device dirver will get the tensor data directly by this "zero-copy" way.\n
747  *
748  * Note that the real tensor data only uses the segment [offset, size) of the shared memory. The offset can be got by
749  * {@link OH_NNTensor_GetOffset} and the size can be got by {@link OH_NNTensor_GetSize}.\n
750  *
751  * if <b>tensor</b> is a null pointer, this method will return null pointer.\n
752  *
753  * @param tensor Pointer to the {@link NN_Tensor} instance.
754  * @return Pointer to data buffer of the tensor, or NULL if it fails to create. The possible reason for failure
755  *         is that the <b>tensor</b> is nullptr, or <b>tensor</b> is invalid.
756  * @since 11
757  * @version 1.0
758  */
759 void *OH_NNTensor_GetDataBuffer(const NN_Tensor *tensor);
760 
761 /**
762  * @brief Gets the file descriptor of the shared memory of a {@link NN_Tensor}.
763  *
764  * The file descriptor <b>fd</b> corresponds to the shared memory of the tensor data, and can be resued
765  * by another {@link NN_Tensor} through {@link OH_NNTensor_CreateWithFd}.\n
766  *
767  * if <b>tensor</b> or <b>fd</b> is a null pointer, this method will return error code.\n
768  *
769  * @param tensor Pointer to the {@link NN_Tensor} instance.
770  * @param fd The returned file descriptor of the shared memory.
771  * @return Execution result of the function.
772  *         {@link OH_NN_SUCCESS} get tensor fd successfully. The return value is saved in parameter fd.\n
773  *         {@link OH_NN_INVALID_PARAMETER} fail to get tensor fd. The possible reason for failure
774  *         is that the <b>tensor</b> or <b>fd</b> is nullptr.\n
775  * @since 11
776  * @version 1.0
777  */
778 OH_NN_ReturnCode OH_NNTensor_GetFd(const NN_Tensor *tensor, int *fd);
779 
780 /**
781  * @brief Gets the size of the shared memory of a {@link NN_Tensor}.
782  *
783  * The <b>size</b> corresponds to the shared memory of the tensor data, and can be resued by another {@link NN_Tensor}
784  * through {@link OH_NNTensor_CreateWithFd}.\n
785  *
786  * The <b>size</b> is as same as the argument <b>size</b> of {@link OH_NNTensor_CreateWithSize} and
787  * {@link OH_NNTensor_CreateWithFd}. But for a tensor created by {@link OH_NNTensor_Create},
788  * it equals to the tensor byte size.\n
789  *
790  * Note that the real tensor data only uses the segment [offset, size) of the shared memory. The offset can be got by
791  * {@link OH_NNTensor_GetOffset} and the size can be got by {@link OH_NNTensor_GetSize}.\n
792  *
793  * if <b>tensor</b> or <b>size</b> is a null pointer, this method will return error code.\n
794  *
795  * @param tensor Pointer to the {@link NN_Tensor} instance.
796  * @param size The returned size of tensor data.
797  * @return Execution result of the function.
798  *         {@link OH_NN_SUCCESS} get tensor size successfully. The return value is saved in <b>size</b>.\n
799  *         {@link OH_NN_INVALID_PARAMETER} fail to get tensor size. The possible reason for failure
800  *         is that the <b>tensor</b> or <b>size</b> is nullptr.\n
801  * @since 11
802  * @version 1.0
803  */
804 OH_NN_ReturnCode OH_NNTensor_GetSize(const NN_Tensor *tensor, size_t *size);
805 
806 /**
807  * @brief Get the data offset of a tensor.
808  *
809  * The <b>offset</b> corresponds to the shared memory of the tensor data, and can be resued by another {@link NN_Tensor}
810  * through {@link OH_NNTensor_CreateWithFd}.\n
811  *
812  * Note that the real tensor data only uses the segment [offset, size) of the shared memory. The offset can be got by
813  * {@link OH_NNTensor_GetOffset} and the size can be got by {@link OH_NNTensor_GetSize}.\n
814  *
815  * if <b>tensor</b> or <b>offset</b> is a null pointer, this method will return error code.\n
816  *
817  * @param tensor Pointer to the {@link NN_Tensor} instance.
818  * @param offset The returned offset of tensor data.
819  * @return Execution result of the function.
820  *         {@link OH_NN_SUCCESS} get tensor offset successfully. The return value is saved in <b>offset</b>.\n
821  *         {@link OH_NN_INVALID_PARAMETER} fail to get tensor offset. The possible reason for failure
822  *         is that the <b>tensor</b> or <b>offset</b> is nullptr.\n
823  * @since 11
824  * @version 1.0
825  */
826 OH_NN_ReturnCode OH_NNTensor_GetOffset(const NN_Tensor *tensor, size_t *offset);
827 
828 /**
829  * @brief Creates an executor instance of the {@link OH_NNExecutor} type.
830  *
831  * This method constructs a model inference executor associated with the device based on the passed compilation. \n
832  *
833  * After the {@link OH_NNExecutor} instance is created, you can release the {@link OH_NNCompilation}
834  * instance if you do not need to create any other executors. \n
835  *
836  * @param compilation Pointer to the {@link OH_NNCompilation} instance.
837  * @return Pointer to a {@link OH_NNExecutor} instance, or NULL if it fails to create. The possible reason for failure
838  *         is that the <b>compilation</b> is nullptr, or memory error occurred.
839  * @since 9
840  * @version 1.0
841  */
842 OH_NNExecutor *OH_NNExecutor_Construct(OH_NNCompilation *compilation);
843 
844 /**
845  * @brief Obtains the dimension information about the output tensor.
846  *
847  * After {@link OH_NNExecutor_Run} is called to complete a single inference, call this method to obtain the specified
848  * output dimension information and number of dimensions. It is commonly used in dynamic shape input and output
849  * scenarios.\n
850  *
851  * If the <b>outputIndex</b> is greater than or equal to the output tensor number, this method will return error code.
852  * The output tensor number can be got by {@link OH_NNExecutor_GetOutputCount}.\n
853  *
854  * As an output parameter, <b>*shape</b> must be a null pointer, otherwise the method will return an error code.
855  * Fou example, you should define int32_t* tensorShape = NULL, and pass &tensorShape as the argument of <b>shape</b>.\n
856  *
857  * You do not need to release the memory of <b>shape</b>. It will be released when <b>executor</b> is destroied.\n
858  *
859  * @param executor Pointer to the {@link OH_NNExecutor} instance.
860  * @param outputIndex Output Index value, which is in the same sequence of the data output when
861  *                    {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
862  *                    Assume that <b>outputIndices</b> is <b>{4, 6, 8}</b> when
863  *                    {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
864  *                    When {@link OH_NNExecutor_GetOutputShape} is called to obtain dimension information about
865  *                    the output tensor, <b>outputIndices</b> is <b>{0, 1, 2}</b>.
866  * @param shape Pointer to the int32_t array. The value of each element in the array is the length of the output tensor
867  *              in each dimension.
868  * @param shapeLength Pointer to the uint32_t type. The number of output dimensions is returned.
869  * @return Execution result of the function.
870  *         {@link OH_NN_SUCCESS} get tensor output shape successfully. The return value is saved in
871  *         <b>shape</b> and <b>shapeLength</b>.\n
872  *         {@link OH_NN_INVALID_PARAMETER} fail to get tensor output shape. The possible reason for failure is that
873  *         the <b>executor</b>, <b>shape</b> or <b>shapeLength</b> is nullptr, or <b>*shape</b> is not nullptr,
874  *         or <b>outputIndex</b> is out of range.\n
875  * @since 9
876  * @version 1.0
877  */
878 OH_NN_ReturnCode OH_NNExecutor_GetOutputShape(OH_NNExecutor *executor,
879                                               uint32_t outputIndex,
880                                               int32_t **shape,
881                                               uint32_t *shapeLength);
882 
883 /**
884  * @brief Destroys an executor instance to release the memory occupied by the executor.
885  *
886  * This method needs to be called to release the executor instance created by calling {@link OH_NNExecutor_Construct}. Otherwise,
887  * the memory leak will occur. \n
888  *
889  * If <b>executor</b> or <b>*executor</b> is a null pointer, this method only prints warning logs and does not execute the release. \n
890  *
891  * @param executor Double pointer to the {@link OH_NNExecutor} instance.
892  * @since 9
893  * @version 1.0
894  */
895 void OH_NNExecutor_Destroy(OH_NNExecutor **executor);
896 
897 /**
898  * @brief Gets the input tensor count.
899  *
900  * You can get the input tensor count from the executor, and then create an input tensor descriptor with its index by
901  * {@link OH_NNExecutor_CreateInputTensorDesc}. \n
902  *
903  * @param executor Pointer to the {@link OH_NNExecutor} instance.
904  * @param inputCount Input tensor count returned.
905  * @return Execution result of the function.
906  *         {@link OH_NN_SUCCESS} get input count successfully. The return value is saved in <b>inputCount</b>.\n
907  *         {@link OH_NN_INVALID_PARAMETER} fail to get input count. The possible reason for failure is that
908  *         the <b>executor</b> or <b>inputCount</b> is nullptr.\n
909  * @since 11
910  * @version 1.0
911  */
912 OH_NN_ReturnCode OH_NNExecutor_GetInputCount(const OH_NNExecutor *executor, size_t *inputCount);
913 
914 /**
915  * @brief Gets the output tensor count.
916  *
917  * You can get the output tensor count from the executor, and then create an output tensor descriptor with its index by
918  * {@link OH_NNExecutor_CreateOutputTensorDesc}. \n
919  *
920  * @param executor Pointer to the {@link OH_NNExecutor} instance.
921  * @param outputCount Output tensor count returned.
922  * @return Execution result of the function.
923  *         {@link OH_NN_SUCCESS} get output count successfully. The return value is saved in <b>outputCount</b>.\n
924  *         {@link OH_NN_INVALID_PARAMETER} fail to get output count. The possible reason for failure is that
925  *         the <b>executor</b> or <b>outputCount</b> is nullptr.\n
926  * @since 11
927  * @version 1.0
928  */
929 OH_NN_ReturnCode OH_NNExecutor_GetOutputCount(const OH_NNExecutor *executor, size_t *outputCount);
930 
931 /**
932  * @brief Creates an input tensor descriptor with its index.
933  *
934  * The input tensor descriptor contains all attributes of the input tensor.
935  * If the <b>index</b> is greater than or equal to the input tensor number, this method will return error code.
936  * The input tensor number can be got by {@link OH_NNExecutor_GetInputCount}.\n
937  *
938  * @param executor Pointer to the {@link OH_NNExecutor} instance.
939  * @param index Input tensor index.
940  * @return Pointer to {@link NN_TensorDesc} instance, or NULL if it fails to create. The possible reason for
941  *         failure is that the <b>executor</b> is nullptr, or <b>index</b> is out of range.
942  * @since 11
943  * @version 1.0
944  */
945 NN_TensorDesc *OH_NNExecutor_CreateInputTensorDesc(const OH_NNExecutor *executor, size_t index);
946 
947 /**
948  * @brief Creates an output tensor descriptor with its index.
949  *
950  * The output tensor descriptor contains all attributes of the output tensor.
951  * If the <b>index</b> is greater than or equal to the output tensor number, this method will return error code.
952  * The output tensor number can be got by {@link OH_NNExecutor_GetOutputCount}.\n
953  *
954  * @param executor Pointer to the {@link OH_NNExecutor} instance.
955  * @param index Output tensor index.
956  * @return Pointer to {@link NN_TensorDesc} instance, or NULL if it fails to create. The possible reason for
957  *         failure is that the <b>executor</b> is nullptr, or <b>index</b> is out of range.
958  * @since 11
959  * @version 1.0
960  */
961 NN_TensorDesc *OH_NNExecutor_CreateOutputTensorDesc(const OH_NNExecutor *executor, size_t index);
962 
963 /**
964  * @brief Gets the dimension ranges of an input tensor.
965  *
966  * The supported dimension ranges of an input tensor with dynamic shape may be different among various devices.
967  * You can call this method to get the dimension ranges of the input tensor supported by the device.
968  * <b>*minInputDims</b> contains the minimum demensions of the input tensor, and <b>*maxInputDims</b> contains the
969  * maximum, e.g. if an input tensor has dynamic shape [-1, -1, -1, 3], its <b>*minInputDims</b> may be [1, 10, 10, 3]
970  * and <b>*maxInputDims</b> may be [100, 1024, 1024, 3] on the device.\n
971  *
972  * If the <b>index</b> is greater than or equal to the input tensor number, this method will return error code.
973  * The input tensor number can be got by {@link OH_NNExecutor_GetInputCount}.\n
974  *
975  * As an output parameter, <b>*minInputDims</b> or <b>*maxInputDims</b> must be a null pointer, otherwise the method
976  * will return an error code. For example, you should define int32_t* minInDims = NULL, and pass &minInDims as the
977  * argument of <b>minInputDims</b>.\n
978  *
979  * You do not need to release the memory of <b>*minInputDims</b> or <b>*maxInputDims</b>.
980  * It will be released when <b>executor</b> is destroied.\n
981  *
982  * @param executor Pointer to the {@link OH_NNExecutor} instance.
983  * @param index Input tensor index.
984  * @param minInputDims Returned pointer to an array contains the minimum dimensions of the input tensor.
985  * @param maxInputDims Returned pointer to an array contains the maximum dimensions of the input tensor.
986  * @param shapeLength Returned length of the shape of input tensor.
987  * @return Execution result of the function.
988  *         {@link OH_NN_SUCCESS} get input dim range successfully. The return value is saved in <b>minInputDims</b>,
989  *         <b>maxInputDims</b> and <b>shapeLength</b>.\n
990  *         {@link OH_NN_INVALID_PARAMETER} fail to get input dim range. The possible reason for failure is that
991  *         the <b>executor</b>, <b>minInputDims</b>, <b>maxInputDims</b> or <b>shapeLength</b> is nullptr, or
992  *         <b>*minInputDims</b> or <b>*maxInputDims</b> is not nullptr, or <b>index</b> is out of range.\n
993  *         {@link OH_NN_OPERATION_FORBIDDEN} the backend device is not supported to get input dim range.\n
994  * @since 11
995  * @version 1.0
996  */
997 OH_NN_ReturnCode OH_NNExecutor_GetInputDimRange(const OH_NNExecutor *executor,
998                                                 size_t index,
999                                                 size_t **minInputDims,
1000                                                 size_t **maxInputDims,
1001                                                 size_t *shapeLength);
1002 
1003 /**
1004  * @brief Sets the callback function handle for the post-process when the asynchronous execution has been done.
1005  *
1006  * The definition fo the callback function: {@link NN_OnRunDone}. \n
1007  *
1008  * @param executor Pointer to the {@link OH_NNExecutor} instance.
1009  * @param onRunDone Callback function handle {@link NN_OnRunDone}.
1010  * @return Execution result of the function.
1011  *         {@link OH_NN_SUCCESS} set on run done successfully.\n
1012  *         {@link OH_NN_INVALID_PARAMETER} fail to set on run done. The possible reason for failure is that
1013  *         the <b>executor</b> or <b>onRunDone</b> is nullptr.\n
1014  *         {@link OH_NN_OPERATION_FORBIDDEN} the backend device is not supported to set on run done.\n
1015  * @since 11
1016  * @version 1.0
1017  */
1018 OH_NN_ReturnCode OH_NNExecutor_SetOnRunDone(OH_NNExecutor *executor, NN_OnRunDone onRunDone);
1019 
1020 /**
1021  * @brief Sets the callback function handle for the post-process when the device driver service is dead during asynchronous execution.
1022  *
1023  * The definition fo the callback function: {@link NN_OnServiceDied}. \n
1024  *
1025  * @param executor Pointer to the {@link OH_NNExecutor} instance.
1026  * @param onServiceDied Callback function handle {@link NN_OnServiceDied}.
1027  * @return Execution result of the function.
1028  *         {@link OH_NN_SUCCESS} set on service died successfully.\n
1029  *         {@link OH_NN_INVALID_PARAMETER} fail to set on service died. The possible reason for failure is that
1030  *         the <b>executor</b> or <b>onServiceDied</b> is nullptr.\n
1031  *         {@link OH_NN_OPERATION_FORBIDDEN} the backend device is not supported to set on service died.\n
1032  * @since 11
1033  * @version 1.0
1034  */
1035 OH_NN_ReturnCode OH_NNExecutor_SetOnServiceDied(OH_NNExecutor *executor, NN_OnServiceDied onServiceDied);
1036 
1037 /**
1038  * @brief Synchronous execution of the model inference.
1039  *
1040  * Input and output tensors should be created first by {@link OH_NNTensor_Create}, {@link OH_NNTensor_CreateWithSize} or
1041  * {@link OH_NNTensor_CreateWithFd}. And then the input tensors data which is got by {@link OH_NNTensor_GetDataBuffer} must be filled.
1042  * The executor will then yield out the results by inference execution and fill them into output tensors data for you to read. \n
1043  *
1044  * In the case of dynamic shape, you can get the real output shape directly by {@link OH_NNExecutor_GetOutputShape}, or you
1045  * can create a tensor descriptor from an output tensor by {@link OH_NNTensor_GetTensorDesc}, and then read its real shape
1046  * by {@link OH_NNTensorDesc_GetShape}. \n
1047  *
1048  * @param executor Pointer to the {@link OH_NNExecutor} instance.
1049  * @param inputTensor An array of input tensors {@link NN_Tensor}.
1050  * @param inputCount Number of input tensors.
1051  * @param outputTensor An array of output tensors {@link NN_Tensor}.
1052  * @param outputCount Number of output tensors.
1053  * @return Execution result of the function.
1054  *         {@link OH_NN_SUCCESS} run successfully.\n
1055  *         {@link OH_NN_INVALID_PARAMETER} fail to run. The possible reason for failure is that the <b>executor</b>,
1056  *         <b>inputTensor</b> or <b>outputTensor</b> is nullptr, or <b>inputCount</b> or <b>outputCount</b> is 0.\n
1057  *         {@link OH_NN_FAILED} the backend device failed to run.\n
1058  *         {@link OH_NN_NULL_PTR} the parameters of input or output tensor is invalid.\n
1059  * @since 11
1060  * @version 1.0
1061  */
1062 OH_NN_ReturnCode OH_NNExecutor_RunSync(OH_NNExecutor *executor,
1063                                        NN_Tensor *inputTensor[],
1064                                        size_t inputCount,
1065                                        NN_Tensor *outputTensor[],
1066                                        size_t outputCount);
1067 
1068 /**
1069  * @brief Asynchronous execution of the model inference.
1070  *
1071  * Input and output tensors should be created first by {@link OH_NNTensor_Create}, {@link OH_NNTensor_CreateWithSize} or
1072  * {@link OH_NNTensor_CreateWithFd}. And then the input tensors data which is got by {@link OH_NNTensor_GetDataBuffer}
1073  * must be filled. The executor will yield out the results by inference execution and fill them into output tensors data
1074  * for you to read.\n
1075  *
1076  * In the case of dynamic shape, you can get the real output shape directly by {@link OH_NNExecutor_GetOutputShape}, or
1077  * you can create a tensor descriptor from an output tensor by {@link OH_NNTensor_GetTensorDesc}, and then read its real
1078  * shape by {@link OH_NNTensorDesc_GetShape}.\n
1079  *
1080  * The method is non-blocked and will return immediately.\n
1081  *
1082  * The callback function handles are set by {@link OH_NNExecutor_SetOnRunDone}
1083  * and {@link OH_NNExecutor_SetOnServiceDied}. The inference results and error code can be got by
1084  * {@link NN_OnRunDone}. And you can deal with the abnormal termination of device driver service during
1085  * asynchronous execution by {@link NN_OnServiceDied}.\n
1086  *
1087  * If the execution time reaches the <b>timeout</b>, the execution will be terminated
1088  * with no outputs, and the <b>errCode<b> returned in callback function {@link NN_OnRunDone} will be
1089  * {@link OH_NN_TIMEOUT}.\n
1090  *
1091  * The <b>userData</b> is asynchronous execution identifier and will be returned as the first parameter of the callback
1092  * function. You can input any value you want as long as it can identify different asynchronous executions.\n
1093  *
1094  * @param executor Pointer to the {@link OH_NNExecutor} instance.
1095  * @param inputTensor An array of input tensors {@link NN_Tensor}.
1096  * @param inputCount Number of input tensors.
1097  * @param outputTensor An array of output tensors {@link NN_Tensor}.
1098  * @param outputCount Number of output tensors.
1099  * @param timeout Time limit (millisecond) of the asynchronous execution, e.g. 1000.
1100  * @param userData Asynchronous execution identifier.
1101  * @return Execution result of the function.
1102  *         {@link OH_NN_SUCCESS} run successfully.\n
1103  *         {@link OH_NN_INVALID_PARAMETER} fail to run. The possible reason for failure is that the <b>executor</b>,
1104  *         <b>inputTensor</b>, <b>outputTensor</b> or <b>userData</b> is nullptr, or <b>inputCount</b> or
1105  *         <b>outputCount</b> is 0.\n
1106  *         {@link OH_NN_FAILED} the backend device failed to run.\n
1107  *         {@link OH_NN_NULL_PTR} the parameters of input or output tensor is invalid.\n
1108  *         {@link OH_NN_OPERATION_FORBIDDEN} the backend device is not supported to run async.\n
1109  * @since 11
1110  * @version 1.0
1111  */
1112 OH_NN_ReturnCode OH_NNExecutor_RunAsync(OH_NNExecutor *executor,
1113                                         NN_Tensor *inputTensor[],
1114                                         size_t inputCount,
1115                                         NN_Tensor *outputTensor[],
1116                                         size_t outputCount,
1117                                         int32_t timeout,
1118                                         void *userData);
1119 
1120 /**
1121  * @brief Obtains the IDs of all devices connected.
1122  *
1123  * Each device has an unique and fixed ID. This method returns device IDs on the current device through the uint32_t
1124  * array.\n
1125  *
1126  * Device IDs are returned through the size_t array. Each element of the array is the ID of a single device.\n
1127  *
1128  * The array memory is managed inside, so you do not need to care about it.
1129  * The data pointer is valid before this method is called next time.\n
1130  *
1131  * @param allDevicesID Pointer to the size_t array. The input <b>*allDevicesID</b> must be a null pointer.
1132  *                     Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned.
1133  * @param deviceCount Pointer of the uint32_t type, which is used to return the length of <b>*allDevicesID</b>.
1134  * @return Execution result of the function.
1135  *         {@link OH_NN_SUCCESS} get all devices id successfully.\n
1136  *         {@link OH_NN_INVALID_PARAMETER} fail to get all devices id. The possible reason for failure is that
1137  *         the <b>allDevicesID</b> or <b>deviceCount</b> is nullptr, or <b>*allDevicesID</b> is not nullptr.\n
1138  * @since 9
1139  * @version 1.0
1140  */
1141 OH_NN_ReturnCode OH_NNDevice_GetAllDevicesID(const size_t **allDevicesID, uint32_t *deviceCount);
1142 
1143 /**
1144  * @brief Obtains the name of the specified device.
1145  *
1146  * <b>deviceID</b> specifies the device whose name will be obtained. The device ID needs to be obtained by calling
1147  * {@link OH_NNDevice_GetAllDevicesID}.
1148  * If it is 0, the first device in the current device list will be used by default.\n
1149  *
1150  * The value of <b>*name</b> is a C-style string ended with <b>'\0'</b>. <b>*name</b> must be a null pointer.
1151  * Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned.
1152  * Fou example, you should define char* deviceName = NULL, and pass &deviceName as the argument of <b>name</b>.\n
1153  *
1154  * @param deviceID Device ID. If it is 0, the first device in the current device list will be used by default.
1155  * @param name The device name returned.
1156  * @return Execution result of the function.
1157  *         {@link OH_NN_SUCCESS} get name of specific device successfully.\n
1158  *         {@link OH_NN_INVALID_PARAMETER} fail to get name of specific device. The possible reason for failure is that
1159  *         the <b>name</b> is nullptr or <b>*name</b> is not nullptr.\n
1160  *         {@link OH_NN_FAILED} fail to get name of specific device. The possible reason for failure is that
1161  *         the <b>deviceID</b> is invalid.\n
1162  * @since 9
1163  * @version 1.0
1164  */
1165 OH_NN_ReturnCode OH_NNDevice_GetName(size_t deviceID, const char **name);
1166 
1167 /**
1168  * @brief Obtains the type information of the specified device.
1169  *
1170  * <b>deviceID</b> specifies the device whose type will be obtained. If it is 0, the first device in the current device
1171  * list will be used. Currently the following device types are supported:
1172  * - <b>OH_NN_CPU</b>: CPU device.
1173  * - <b>OH_NN_GPU</b>: GPU device.
1174  * - <b>OH_NN_ACCELERATOR</b>: machine learning dedicated accelerator.
1175  * - <b>OH_NN_OTHERS</b>: other hardware types. \n
1176  *
1177  * @param deviceID Device ID. If it is 0, the first device in the current device list will be used by default.
1178  * @param deviceType The device type {@link OH_NN_DeviceType} returned.
1179  * @return Execution result of the function.
1180  *         {@link OH_NN_SUCCESS} get type of specific device successfully.\n
1181  *         {@link OH_NN_INVALID_PARAMETER} fail to get type of specific device. The possible reason for failure is that
1182  *         the <b>deviceType</b> is nullptr.\n
1183  *         {@link OH_NN_FAILED} fail to get type of specific device. The possible reason for failure is that
1184  *         the <b>deviceID</b> is invalid.\n
1185  * @since 9
1186  * @version 1.0
1187  */
1188 OH_NN_ReturnCode OH_NNDevice_GetType(size_t deviceID, OH_NN_DeviceType *deviceType);
1189 
1190 #ifdef __cplusplus
1191 }
1192 #endif // __cplusplus
1193 
1194 /** @} */
1195 #endif // NEURAL_NETWORK_CORE_H
1196