• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022-2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /**
17  * @addtogroup NeuralNetworkRuntime
18  * @{
19  *
20  * @brief Provides APIs of Neural Network Runtime for accelerating the model inference.
21  *
22  * @since 9
23  * @version 2.0
24  */
25 
26 /**
27  * @file neural_network_runtime.h
28  *
29  * @brief Defines the Neural Network Runtime APIs. The AI inference framework uses the Native APIs provided by Neural Network Runtime
30  *        to construct models.
31  *
32  * Note: Currently, the APIs of Neural Network Runtime do not support multi-thread calling. \n
33  *
34  * include "neural_network_runtime/neural_network_runtime.h"
35  * @library libneural_network_runtime.so
36  * @kit NeuralNetworkRuntimeKit
37  * @syscap SystemCapability.AI.NeuralNetworkRuntime
38  * @since 9
39  * @version 2.0
40  */
41 
42 #ifndef NEURAL_NETWORK_RUNTIME_H
43 #define NEURAL_NETWORK_RUNTIME_H
44 
45 #include "neural_network_runtime_type.h"
46 #include "neural_network_core.h"
47 
48 #ifdef __cplusplus
49 extern "C" {
50 #endif
51 
52 /**
53  * @brief Creates a {@link NN_QuantParam} instance.
54  *
55  * After the {@link NN_QuantParam} instance is created, call {@link OH_NNQuantParam_SetScales}, {@link OH_NNQuantParam_SetZeroPoints},
56  * {@link OH_NNQuantParam_SetNumBits} to set its attributes, and then call {@link OH_NNModel_SetTensorQuantParams} to set it
57  * to a tensor. After that you should destroy it by calling {@link OH_NNQuantParam_Destroy} to avoid memory leak. \n
58  *
59  * @return Pointer to a {@link NN_QuantParam} instance, or NULL if it fails to create.
60  * @since 11
61  * @version 1.0
62  */
63 NN_QuantParam *OH_NNQuantParam_Create();
64 
65 /**
66  * @brief Sets the scales of the {@link NN_QuantParam} instance.
67  *
68  * The parameter <b>quantCount</b> is the number of quantization parameters of a tensor, e.g. the quantCount is the
69  * channel count if the tensor is per-channel quantized.\n
70  *
71  * @param quantParams Pointer to the {@link NN_QuantParam} instance.
72  * @param scales An array of scales for all quantization parameters of the tensor.
73  * @param quantCount Number of quantization parameters of the tensor.
74  * @return Execution result of the function.
75  *         {@link OH_NN_SUCCESS} set scales of quant parameters successfully.\n
76  *         {@link OH_NN_INVALID_PARAMETER} fail to set scales of quant parameters. The possible reason for failure
77  *         is that the <b>quantParams</b> or <b>scales</b> is nullptr, or <b>quantCount</b> is 0.\n
78  * @since 11
79  * @version 1.0
80  */
81 OH_NN_ReturnCode OH_NNQuantParam_SetScales(NN_QuantParam *quantParams, const double *scales, size_t quantCount);
82 
83 /**
84  * @brief Sets the zero points of the {@link NN_QuantParam} instance.
85  *
86  * The parameter <b>quantCount</b> is the number of quantization parameters of a tensor, e.g. the quantCount is the
87  * channel count if the tensor is per-channel quantized.\n
88  *
89  * @param quantParams Pointer to the {@link NN_QuantParam} instance.
90  * @param zeroPoints An array of zero points for all quantization parameters of the tensor.
91  * @param quantCount Number of quantization parameters of the tensor.
92  * @return Execution result of the function.
93  *         {@link OH_NN_SUCCESS} set zero points of quant parameters successfully.\n
94  *         {@link OH_NN_INVALID_PARAMETER} fail to set zero points of quant parameters. The possible reason for failure
95  *         is that the <b>quantParams</b> or <b>zeroPoints</b> is nullptr, or <b>quantCount</b> is 0.\n
96  * @since 11
97  * @version 1.0
98  */
99 OH_NN_ReturnCode OH_NNQuantParam_SetZeroPoints(NN_QuantParam *quantParams, const int32_t *zeroPoints, size_t quantCount);
100 
101 /**
102  * @brief Sets the number bits of the {@link NN_QuantParam} instance.
103  *
104  * The parameter <b>quantCount</b> is the number of quantization parameters of a tensor, e.g. the quantCount is the
105  * channel count if the tensor is per-channel quantized.\n
106  *
107  * @param quantParams Pointer to the {@link NN_QuantParam} instance.
108  * @param numBits An array of number bits for all quantization parameters of the tensor.
109  * @param quantCount Number of quantization parameters of the tensor.
110  * @return Execution result of the function.
111  *         {@link OH_NN_SUCCESS} set num bits of quant parameters successfully.\n
112  *         {@link OH_NN_INVALID_PARAMETER} fail to set num bits of quant parameters. The possible reason for failure
113  *         is that the <b>quantParams</b> or <b>numBits</b> is nullptr, or <b>quantCount</b> is 0.\n
114  * @since 11
115  * @version 1.0
116  */
117 OH_NN_ReturnCode OH_NNQuantParam_SetNumBits(NN_QuantParam *quantParams, const uint32_t *numBits, size_t quantCount);
118 
119 /**
120  * @brief Releases a {@link NN_QuantParam} instance.
121  *
122  * The {@link NN_QuantParam} instance needs to be released to avoid memory leak after it is set to a {@link NN_TensorDesc}. \n
123  *
124  * If <b>quantParams</b> or <b>*quantParams</b> is a null pointer, this method only prints warning logs and does not
125  * execute the release. \n
126  *
127  * @param quantParams Double pointer to the {@link NN_QuantParam} instance.
128  * @return Execution result of the function.
129  *         {@link OH_NN_SUCCESS} destroy quant parameters object successfully.\n
130  *         {@link OH_NN_INVALID_PARAMETER} fail to destroy quant parameters object. The possible reason for failure
131  *         is that the <b>quantParams</b> or <b>*quantParams</b> is nullptr.\n
132  * @since 11
133  * @version 1.0
134  */
135 OH_NN_ReturnCode OH_NNQuantParam_Destroy(NN_QuantParam **quantParams);
136 
137 /**
138  * @brief Creates a model instance of the {@link OH_NNModel} type and uses other APIs provided by OH_NNModel to construct the model instance.
139  *
140  * Before composition, call {@link OH_NNModel_Construct} to create a model instance. Based on the model topology,
141  * call the {@link OH_NNModel_AddTensorToModel}, {@link OH_NNModel_AddOperation}, and {@link OH_NNModel_SetTensorData} methods
142  * to fill in the data and operator nodes of the model, and then call {@link OH_NNModel_SpecifyInputsAndOutputs} to specify the inputs and outputs of the model.
143  * After the model topology is constructed, call {@link OH_NNModel_Finish} to build the model. \n
144  *
145  * After a model instance is no longer used, you need to destroy it by calling {@link OH_NNModel_Destroy} to avoid memory leak. \n
146  *
147  * @return Pointer to a {@link OH_NNModel} instance, or NULL if it fails to create.
148  * @since 9
149  * @version 1.0
150  */
151 OH_NNModel *OH_NNModel_Construct(void);
152 
153 /**
154  * @brief Adds a tensor to the model instance.
155  *
156  * The data node and operator parameters in the Neural Network Runtime model are composed of tensors of the model.
157  * This method is used to add tensors to a model instance based on the <b>tensorDesc</b> parameter with type of {@link NN_TensorDesc}.
158  * {@link NN_TensorDesc} contains some attributes such as shape, format, data type and provides corresponding APIs to access them.
159  * The order of adding tensors is specified by the indices recorded in the model. The {@link OH_NNModel_SetTensorData}, {@link OH_NNModel_AddOperation},
160  * and {@link OH_NNModel_SpecifyInputsAndOutputs} methods specify tensors based on the indices. \n
161  *
162  * Neural Network Runtime supports inputs and outputs of the dynamic shape. When adding a data node with a dynamic shape,
163  * you need to set the dimensions that support dynamic changes to <b>-1</b>.
164  * For example, if the shape of a four-dimensional tensor is set to <b>[1, -1, 2, 2]</b>, the second dimension supports dynamic changes. \n
165  *
166  * @param model Pointer to the {@link OH_NNModel} instance.
167  * @param tensorDesc Pointer to the {@link NN_TensorDesc} instance. The tensor descriptor specifies the attributes of the tensor added to the model instance.
168  * @return Execution result of the function.
169  *         {@link OH_NN_SUCCESS} add tensor to model successfully.\n
170  *         {@link OH_NN_INVALID_PARAMETER} fail to add tensor to model. The possible reason for failure
171  *         is that the <b>model</b> or <b>tensorDesc</b> is nullptr.\n
172  *         {@link OH_NN_MEMORY_ERROR} fail to add tensor to model. The possible reason for failure
173  *         is that the memory error occurred such as failure to create an object.\n
174  * @since 11
175  * @version 1.0
176  */
177 OH_NN_ReturnCode OH_NNModel_AddTensorToModel(OH_NNModel *model, const NN_TensorDesc *tensorDesc);
178 
179 /**
180  * @brief Sets the tensor value.
181  *
182  * For tensors with constant values (such as model weights), you need to use this method to set their data.
183  * The index of a tensor is determined by the order in which the tensor is added to the model.
184  * For details about how to add a tensor, see {@link OH_NNModel_AddTensorToModel}. \n
185  *
186  * @param model Pointer to the {@link OH_NNModel} instance.
187  * @param index Index of a tensor.
188  * @param dataBuffer Pointer to real data.
189  * @param length Length of the data buffer.
190  * @return Execution result of the function.
191  *         {@link OH_NN_SUCCESS} set tensor data successfully.\n
192  *         {@link OH_NN_INVALID_PARAMETER} fail to set tensor data. The possible reason for failure is that the
193  *         <b>model</b> or <b>dataBuffer</b> is nullptr, or <b>length</b> is 0, or <b>index</b> is out of range.\n
194  *         {@link OH_NN_OPERATION_FORBIDDEN} fail to set tensor data. The possible reason for failure
195  *         is that the model is invalid.\n
196  * @since 9
197  * @version 1.0
198  */
199 OH_NN_ReturnCode OH_NNModel_SetTensorData(OH_NNModel *model, uint32_t index, const void *dataBuffer, size_t length);
200 
201 /**
202  * @brief Sets the quantization parameter of a tensor.
203  *
204  * @param model Pointer to the {@link OH_NNModel} instance.
205  * @param index Index of a tensor.
206  * @param quantParam Pointer to the quantization parameter instance.
207  * @return Execution result of the function.
208  *         {@link OH_NN_SUCCESS} set tensor quant parameters successfully.\n
209  *         {@link OH_NN_INVALID_PARAMETER} fail to set tensor quant parameters. The possible reason for failure
210  *         is that the <b>model</b> or <b>quantParam</b> is nullptr, or <b>index</b> is out of range.\n
211  *         {@link OH_NN_OPERATION_FORBIDDEN} fail to set tensor quant parameters. The possible reason for failure
212  *         is that the model is invalid.\n
213  * @since 11
214  * @version 1.0
215  */
216 OH_NN_ReturnCode OH_NNModel_SetTensorQuantParams(OH_NNModel *model, uint32_t index, NN_QuantParam *quantParam);
217 
218 /**
219  * @brief Sets the tensor type. See {@link OH_NN_TensorType} for details.
220  *
221  * @param model Pointer to the {@link OH_NNModel} instance.
222  * @param index Index of a tensor.
223  * @param tensorType Tensor type of {@link OH_NN_TensorType}.
224  * @return Execution result of the function.
225  *         {@link OH_NN_SUCCESS} set tensor type successfully.\n
226  *         {@link OH_NN_INVALID_PARAMETER} fail to set tensor type. The possible reason for failure
227  *         is that the <b>model</b> is nullptr, or <b>index</b> is out of range, or <b>tensorType</b> is invalid.\n
228  *         {@link OH_NN_OPERATION_FORBIDDEN} fail to set tensor type. The possible reason for failure
229  *         is that the model is invalid.\n
230  * @since 11
231  * @version 1.0
232  */
233 OH_NN_ReturnCode OH_NNModel_SetTensorType(OH_NNModel *model, uint32_t index, OH_NN_TensorType tensorType);
234 
235 /**
236  * @brief Adds an operator to a model instance.
237  *
238  * This method is used to add an operator to a model instance. The operator type is specified by <b>op</b>, and
239  * the operator parameters, inputs, and outputs are specified by <b>paramIndices</b>, <b>inputIndices</b>, and <b>outputIndices</b> respectively.
240  * This method verifies the attributes of operator parameters and the number of input and output parameters.
241  * These attributes must be correctly set when {@link OH_NNModel_AddTensorToModel} is called to add tensors.
242  * For details about the expected parameters, input attributes, and output attributes of each operator, see {@link OH_NN_OperationType}. \n
243  *
244  * <b>paramIndices</b>, <b>inputIndices</b>, and <b>outputIndices</b> store the indices of tensors.
245  * The indices are determined by the order in which tensors are added to the model.
246  * For details about how to add a tensor, see {@link OH_NNModel_AddTensorToModel}. \n
247  *
248  * If unnecessary parameters are added when adding an operator, this method returns {@link OH_NN_INVALID_PARAMETER}.
249  * If no operator parameter is set, the operator uses the default parameter value.
250  * For details about the default values, see {@link OH_NN_OperationType}. \n
251  *
252  * @param model Pointer to the {@link OH_NNModel} instance.
253  * @param op Specifies the type of an operator to be added. For details, see the enumerated values of {@link OH_NN_OperationType}.
254  * @param paramIndices Pointer to the <b>OH_NN_UInt32Array</b> instance, which is used to set operator parameters.
255  * @param inputIndices Pointer to the <b>OH_NN_UInt32Array</b> instance, which is used to set the operator input.
256  * @param outputIndices Pointer to the <b>OH_NN_UInt32Array</b> instance, which is used to set the operator output.
257  * @return Execution result of the function.
258  *         {@link OH_NN_SUCCESS} add operation to model successfully.\n
259  *         {@link OH_NN_INVALID_PARAMETER} fail to add operation to model. The possible reason for failure is that the
260  *         <b>model</b>, <b>paramIndices</b>, <b>inputIndices</b> or <b>outputIndices</b> is nullptr, or parameters are
261  *         invalid.\n
262  *         {@link OH_NN_OPERATION_FORBIDDEN} fail to add operation to model. The possible reason for failure
263  *         is that the model is invalid.\n
264  * @since 9
265  * @version 1.0
266  */
267 OH_NN_ReturnCode OH_NNModel_AddOperation(OH_NNModel *model,
268                                          OH_NN_OperationType op,
269                                          const OH_NN_UInt32Array *paramIndices,
270                                          const OH_NN_UInt32Array *inputIndices,
271                                          const OH_NN_UInt32Array *outputIndices);
272 
273 /**
274  * @brief Specifies the inputs and outputs of a model.
275  *
276  * A tensor must be specified as the end-to-end inputs and outputs of a model instance. This type of tensor cannot be set
277  * using {@link OH_NNModel_SetTensorData}. \n
278  *
279  * The index of a tensor is determined by the order in which the tensor is added to the model.
280  * For details about how to add a tensor, see {@link OH_NNModel_AddTensorToModel}. \n
281  *
282  * Currently, the model inputs and outputs cannot be set asynchronously. \n
283  *
284  * @param model Pointer to the {@link OH_NNModel} instance.
285  * @param inputIndices Pointer to the <b>OH_NN_UInt32Array</b> instance, which is used to set the operator input.
286  * @param outputIndices Pointer to the <b>OH_NN_UInt32Array</b> instance, which is used to set the operator output.
287  * @return Execution result of the function.
288  *         {@link OH_NN_SUCCESS} specify inputs and outputs successfully.\n
289  *         {@link OH_NN_INVALID_PARAMETER} fail to specify inputs and outputs. The possible reason for failure is that
290  *         the <b>model</b>, <b>inputIndices</b> or <b>outputIndices</b> is nullptr, or parameters are invalid.\n
291  *         {@link OH_NN_OPERATION_FORBIDDEN} fail to specify inputs and outputs. The possible reason for failure
292  *         is that the model is invalid.\n
293  * @since 9
294  * @version 1.0
295  */
296 OH_NN_ReturnCode OH_NNModel_SpecifyInputsAndOutputs(OH_NNModel *model,
297                                                     const OH_NN_UInt32Array *inputIndices,
298                                                     const OH_NN_UInt32Array *outputIndices);
299 
300 /**
301  * @brief Completes model composition.
302  *
303  * After the model topology is set up, call this method to indicate that the composition is complete. After this method is called,
304  * additional composition operations cannot be performed. If {@link OH_NNModel_AddTensorToModel}, {@link OH_NNModel_AddOperation},
305  * {@link OH_NNModel_SetTensorData}, and {@link OH_NNModel_SpecifyInputsAndOutputs} are called,
306  * {@link OH_NN_OPERATION_FORBIDDEN} is returned. \n
307  *
308  * Before calling {@link OH_NNModel_GetAvailableOperations} and {@link OH_NNCompilation_Construct},
309  * you must call this method to complete composition. \n
310  *
311  * @param model Pointer to the {@link OH_NNModel} instance.
312  * @return Execution result of the function.
313  *         {@link OH_NN_SUCCESS} the composition is complete successfully.\n
314  *         {@link OH_NN_INVALID_PARAMETER} composition failed. The possible reason for failure
315  *         is that the <b>model</b> is nullptr, or parameters set before are invalid.\n
316  *         {@link OH_NN_OPERATION_FORBIDDEN} composition failed. The possible reason for failure
317  *         is that the model is invalid.\n
318  *         {@link OH_NN_MEMORY_ERROR} composition failed. The possible reason for failure
319  *         is that the memory error occurred such as failure to create an object.\n
320  * @since 9
321  * @version 1.0
322  */
323 OH_NN_ReturnCode OH_NNModel_Finish(OH_NNModel *model);
324 
325 /**
326  * @brief Releases a model instance.
327  *
328  * This method needs to be called to release the model instance created by calling {@link OH_NNModel_Construct}. Otherwise, memory leak will occur. \n
329  *
330  * If <b>model</b> or <b>*model</b> is a null pointer, this method only prints warning logs and does not execute the release. \n
331  *
332  * @param model Double pointer to the {@link OH_NNModel} instance. After a model instance is destroyed, this method sets <b>*model</b> to a null pointer.
333  * @since 9
334  * @version 1.0
335  */
336 void OH_NNModel_Destroy(OH_NNModel **model);
337 
338 /**
339  * @brief Queries whether the device supports operators in the model. The support status is indicated by the Boolean value.
340  *
341  * Queries whether underlying device supports operators in a model instance. The device is specified by <b>deviceID</b>,
342  * and the result is represented by the array pointed by <b>isSupported</b>. If the <i>i</i>th operator is supported,
343  * the value of <b>(*isSupported)</b>[<i>i</i>] is <b>true</b>. Otherwise, the value is <b>false</b>. \n
344  *
345  * After this method is successfully executed, <b>(*isSupported)</b> points to the bool array that records the operator support status.
346  * The operator quantity for the array length is the same as that for the model instance. The memory corresponding to this array is
347  * managed by Neural Network Runtime and is automatically destroyed after the model instance is destroyed or this method is called again. \n
348  *
349  * @param model Pointer to the {@link OH_NNModel} instance.
350  * @param deviceID Device ID to be queried, which can be obtained by using {@link OH_NNDevice_GetAllDevicesID}.
351  * @param isSupported Pointer to the bool array. When this method is called, <b>(*isSupported)</b> must be a null pointer.
352  *                    Otherwise, {@link OH_NN_INVALID_PARAMETER} is returned.
353  * @param opCount Number of operators in a model instance, corresponding to the length of the <b>(*isSupported)</b> array.
354  * @return Execution result of the function.
355  *         {@link OH_NN_SUCCESS} get available operations successfully.\n
356  *         {@link OH_NN_INVALID_PARAMETER} fail to get available operations. The possible reason for failure
357  *         is that the <b>model</b>, <b>isSupported</b> or <b>opCount</b> is nullptr, or <b>*isSupported</b> is
358  *         not nullptr.\n
359  *         {@link OH_NN_OPERATION_FORBIDDEN} fail to get available operations. The possible reason for failure
360  *         is that the model is invalid.\n
361  *         {@link OH_NN_FAILED} fail to get available operations. The possible reason for failure
362  *         is that the <b>deviceID</b> is invalid.\n
363  * @since 9
364  * @version 1.0
365  */
366 OH_NN_ReturnCode OH_NNModel_GetAvailableOperations(OH_NNModel *model,
367                                                    size_t deviceID,
368                                                    const bool **isSupported,
369                                                    uint32_t *opCount);
370 
371 /**
372  * @brief Adds a tensor to a model instance.
373  *
374  * The data node and operator parameters in the Neural Network Runtime model are composed of tensors of the model.
375  * This method is used to add tensors to a model instance based on the <b>tensor</b> parameter.
376  * The sequence of adding tensors is specified by the index value recorded in the model.
377  * The {@link OH_NNModel_SetTensorData}, {@link OH_NNModel_AddOperation},
378  * and {@link OH_NNModel_SpecifyInputsAndOutputs} methods specifies tensors based on the index value.\n
379  *
380  * Neural Network Runtime supports inputs and outputs of the dynamic shape. When adding a data node with a dynamic
381  * shape, you need to set the dimensions that support dynamic changes in <b>tensor.dimensions</b> to <b>-1</b>.
382  * For example, if <b>tensor.dimensions</b> of a four-dimensional tensor is set to <b>[1, -1, 2, 2]</b>,
383  * the second dimension supports dynamic changes.\n
384  *
385  * @param model Pointer to the {@link OH_NNModel} instance.
386  * @param tensor Pointer to the {@link OH_NN_Tensor} tensor. The tensor specifies the attributes of the tensor added to
387  *               the model instance.
388  * @return Execution result of the function.
389  *         {@link OH_NN_SUCCESS} add tensor to model successfully.\n
390  *         {@link OH_NN_INVALID_PARAMETER} fail to add tensor to model. The possible reason for failure
391  *         is that the <b>model</b> or <b>tensor</b> is nullptr.\n
392  *         {@link OH_NN_OPERATION_FORBIDDEN} fail to add tensor to model. The possible reason for failure
393  *         is that the model is invalid.\n
394  * @deprecated since 11
395  * @useinstead {@link OH_NNModel_AddTensorToModel}
396  * @since 9
397  * @version 1.0
398  */
399 OH_NN_ReturnCode OH_NNModel_AddTensor(OH_NNModel *model, const OH_NN_Tensor *tensor);
400 
401 /**
402  * @brief Sets the single input data for a model.
403  *
404  * This method copies the data whose length is specified by <b>length</b> (in bytes) in <b>dataBuffer</b> to the shared
405  * memory of the underlying device. <b>inputIndex</b> specifies the input to be set and <b>tensor</b> sets information
406  * such as the input shape, type, and quantization parameters.\n
407  *
408  * Neural Network Runtime supports models with dynamical shape input. For fixed shape input and dynamic shape input
409  * scenarios, this method uses different processing policies.\n
410  *
411  * - Fixed shape input: The attributes of <b>tensor</b> must be the same as those of the tensor added by calling
412  *   {@link OH_NNModel_AddTensor} in the composition phase.
413  * - Dynamic shape input: In the composition phase, because the shape is not fixed, each value in
414  *   <b>tensor.dimensions</b> must be greater than <b>0</b> in the method calls to determine the shape input in the
415  *   calculation phase. When setting the shape, you can modify only the dimension whose value is <b>-1</b>.
416  *   Assume that <b>[-1, 224, 224, 3]</b> is input as the the dimension of A in the composition phase.
417  *   When this method is called, only the size of the first dimension can be modified, e.g. to <b>[3, 224, 224, 3]</b>.
418  *   If other dimensions are adjusted, {@link OH_NN_INVALID_PARAMETER} is returned.\n
419  *
420  * @param executor Pointer to the {@link OH_NNExecutor} instance.
421  * @param inputIndex Input index value, which is in the same sequence of the data input when
422  *                   {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
423  *                   Assume that the value of <b>inputIndices</b> is <b>{1, 5, 9}</b> when
424  *                   {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
425  *                   In input settings, the index value for the three inputs is <b>{0, 1, 2}</b>.\n
426  * @param tensor Sets the tensor corresponding to the input data.
427  * @param dataBuffer Pointer to the input data.
428  * @param length Length of the data buffer, in bytes.
429  * @return Execution result of the function.
430  *         {@link OH_NN_SUCCESS} set model input successfully.\n
431  *         {@link OH_NN_INVALID_PARAMETER} fail to set model input. The possible reason for failure
432  *         is that the <b>executor</b>, <b>tensor</b> or <b>dataBuffer</b> is nullptr, or <b>inputIndex</b>
433  *         is out of range, or <b>length</b> is 0.\n
434  *         {@link OH_NN_MEMORY_ERROR} fail to set model input. The possible reason for failure
435  *         is that the memory error occurred such as failure to create an object.\n
436  * @deprecated since 11
437  * @useinstead {@link OH_NNExecutor_RunSync}
438  * @since 9
439  * @version 1.0
440  */
441 OH_NN_ReturnCode OH_NNExecutor_SetInput(OH_NNExecutor *executor,
442                                         uint32_t inputIndex,
443                                         const OH_NN_Tensor *tensor,
444                                         const void *dataBuffer,
445                                         size_t length);
446 
447 /**
448  * @brief Sets the buffer for a single output of a model.
449  *
450  * This method binds the buffer to which <b>dataBuffer</b> points to the output specified by <b>outputIndex</b>.
451  * The length of the buffer is specified by <b>length</b>.\n
452  *
453  * After {@link OH_NNExecutor_Run} is called to complete a single model inference, Neural Network Runtime compares
454  * the length of the buffer to which <b>dataBuffer</b> points with the length of the output data and returns different
455  * results based on the actual situation.\n
456  *
457  * - If the buffer length is greater than or equal to the data length, the inference result is copied to the buffer and
458  *   {@link OH_NN_SUCCESS} is returned. You can read the inference result from <b>dataBuffer</b>.
459  * - If the buffer length is smaller than the data length, {@link OH_NNExecutor_Run} returns
460  *   {@link OH_NN_INVALID_PARAMETER} and generates a log indicating that the buffer is too small.\n
461  *
462  * @param executor Pointer to the {@link OH_NNExecutor} instance.
463  * @param outputIndex Output Index value, which is in the same sequence of the data output when
464  *                    {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
465  *                    Assume that the value of <b>outputIndices</b> is <b>{4, 6, 8}</b> when
466  *                    {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
467  *                    In output buffer settings, the index value for the three outputs is <b>{0, 1, 2}</b>.
468  * @param dataBuffer Pointer to the output data.
469  * @param length Length of the data buffer, in bytes.
470  * @return Execution result of the function.
471  *         {@link OH_NN_SUCCESS} set model output successfully.\n
472  *         {@link OH_NN_INVALID_PARAMETER} fail to set model output. The possible reason for failure
473  *         is that the <b>executor</b>, <b>tensor</b> or <b>dataBuffer</b> is nullptr, or <b>outputIndex</b>
474  *         is out of range, or <b>length</b> is 0.\n
475  *         {@link OH_NN_MEMORY_ERROR} fail to set model output. The possible reason for failure
476  *         is that the memory error occurred such as failure to create an object.\n
477  * @deprecated since 11
478  * @useinstead {@link OH_NNExecutor_RunSync}
479  * @since 9
480  * @version 1.0
481  */
482 OH_NN_ReturnCode OH_NNExecutor_SetOutput(OH_NNExecutor *executor,
483                                          uint32_t outputIndex,
484                                          void *dataBuffer,
485                                          size_t length);
486 
487 /**
488  * @brief Performs inference.
489  *
490  * Performs end-to-end inference and computing of the model on the device associated with the executor.\n
491  *
492  * @param executor Pointer to the {@link OH_NNExecutor} instance.
493  * @return Execution result of the function.
494  *         {@link OH_NN_SUCCESS} run model successfully.\n
495  *         {@link OH_NN_INVALID_PARAMETER} fail to run model. The possible reason for failure
496  *         is that the <b>executor</b> is nullptr.\n
497  *         {@link OH_NN_FAILED} fail to set model output. The possible reason for failure
498  *         is that the backend device failed to run model.\n
499  * @deprecated since 11
500  * @useinstead {@link OH_NNExecutor_RunSync}
501  * @since 9
502  * @version 1.0
503  */
504 OH_NN_ReturnCode OH_NNExecutor_Run(OH_NNExecutor *executor);
505 
506 /**
507  * @brief Allocates shared memory to a single input on a device.
508  *
509  * Neural Network Runtime provides a method for proactively allocating shared memory on a device.
510  * By specifying the executor and input index value, this method allocates shared memory whose size is specified by
511  * <b>length</b> on the device associated with a single input and returns the operation result through the
512  * {@link OH_NN_Memory} instance.\n
513  *
514  * @param executor Pointer to the {@link OH_NNExecutor} instance.
515  * @param inputIndex Input index value, which is in the same sequence of the data input when
516  *                   {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
517  *                   Assume that the value of <b>inputIndices</b> is <b>{1, 5, 9}</b> when
518  *                   {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
519  *                   In the memory input application, the index value for the three inputs is <b>{0, 1, 2}</b>.
520  * @param length Memory size to be applied for, in bytes.
521  * @return Pointer to a {@link OH_NN_Memory} instance, or NULL if it fails to create.
522  * @deprecated since 11
523  * @useinstead {@link OH_NNTensor_CreateWithSize}
524  * @since 9
525  * @version 1.0
526  */
527 OH_NN_Memory *OH_NNExecutor_AllocateInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, size_t length);
528 
529 /**
530  * @brief Allocates shared memory to a single output on a device.
531  *
532  * Neural Network Runtime provides a method for proactively allocating shared memory on a device.
533  * By specifying the executor and output index value, this method allocates shared memory whose size is specified by
534  * <b>length</b> on the device associated with a single output and returns the operation result through the
535  * {@link OH_NN_Memory} instance.\n
536  *
537  * @param executor Pointer to the {@link OH_NNExecutor} instance.
538  * @param outputIndex Output Index value, which is in the same sequence of the data output when
539  *                    {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
540  *                    Assume that the value of <b>outputIndices</b> is <b>{4, 6, 8}</b> when
541  *                    {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
542  *                    In output memory application, the index value for the three outputs is <b>{0, 1, 2}</b>.
543  * @param length Memory size to be applied for, in bytes.
544  * @return Pointer to a {@link OH_NN_Memory} instance, or NULL if it fails to create.
545  * @deprecated since 11
546  * @useinstead {@link OH_NNTensor_CreateWithSize}
547  * @since 9
548  * @version 1.0
549  */
550 OH_NN_Memory *OH_NNExecutor_AllocateOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, size_t length);
551 
552 /**
553  * @brief Releases the input memory to which the {@link OH_NN_Memory} instance points.
554  *
555  * This method needs to be called to release the memory instance created by calling
556  * {@link OH_NNExecutor_AllocateInputMemory}. Otherwise, memory leak will occur.
557  * The mapping between <b>inputIndex</b> and <b>memory</b> must be the same as that in memory instance creation.\n
558  *
559  * If <b>memory</b> or <b>*memory</b> is a null pointer, this method only prints warning logs and does not execute
560  * the release logic.\n
561  *
562  * @param executor Pointer to the {@link OH_NNExecutor} instance.
563  * @param inputIndex Input index value, which is in the same sequence of the data input when
564  *                   {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
565  *                   Assume that the value of <b>inputIndices</b> is <b>{1, 5, 9}</b> when
566  *                   {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
567  *                   In memory input release, the index value for the three inputs is <b>{0, 1, 2}</b>.
568  * @param memory Double pointer to the {@link OH_NN_Memory} instance. After shared memory is destroyed,
569  *               this method sets <b>*memory</b> to a null pointer.
570  * @deprecated since 11
571  * @useinstead {@link OH_NNTensor_Destroy}
572  * @since 9
573  * @version 1.0
574  */
575 void OH_NNExecutor_DestroyInputMemory(OH_NNExecutor *executor, uint32_t inputIndex, OH_NN_Memory **memory);
576 
577 /**
578  * @brief Releases the output memory to which the {@link OH_NN_Memory} instance points.
579  *
580  * This method needs to be called to release the memory instance created by calling
581  * {@link OH_NNExecutor_AllocateOutputMemory}. Otherwise, memory leak will occur.
582  * The mapping between <b>outputIndex</b> and <b>memory</b> must be the same as that in memory instance creation.\n
583  *
584  * If <b>memory</b> or <b>*memory</b> is a null pointer, this method only prints warning logs and does not execute
585  * the release logic.\n
586  *
587  * @param executor Pointer to the {@link OH_NNExecutor} instance.
588  * @param outputIndex Output Index value, which is in the same sequence of the data output when
589  *                    {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
590  *                    Assume that the value of <b>outputIndices</b> is <b>{4, 6, 8}</b> when
591  *                    {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
592  *                    In output memory release, the index value for the three outputs is <b>{0, 1, 2}</b>.
593  * @param memory Double pointer to the {@link OH_NN_Memory} instance. After shared memory is destroyed,
594  *               this method sets <b>*memory</b> to a null pointer.
595  * @deprecated since 11
596  * @useinstead {@link OH_NNTensor_Destroy}
597  * @since 9
598  * @version 1.0
599  */
600 void OH_NNExecutor_DestroyOutputMemory(OH_NNExecutor *executor, uint32_t outputIndex, OH_NN_Memory **memory);
601 
602 /**
603  * @brief Specifies the hardware shared memory pointed to by the {@link OH_NN_Memory} instance as the shared memory
604  *        used by a single input.
605  *
606  * In scenarios where memory needs to be managed by yourself, this method binds the execution input to the
607  * {@link OH_NN_Memory} memory instance. During computing, the underlying device reads the input data from the shared
608  * memory pointed to by the memory instance. By using this method, concurrent execution of input setting, computing,
609  * and read can be implemented to improve inference efficiency of a data flow.\n
610  *
611  * @param executor Pointer to the {@link OH_NNExecutor} instance.
612  * @param inputIndex Input index value, which is in the same sequence of the data input when
613  *                   {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
614  *                   Assume that the value of <b>inputIndices</b> is <b>{1, 5, 9}</b> when
615  *                   {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
616  *                   When the input shared memory is specified, the index value for the three inputs is
617  *                   <b>{0, 1, 2}</b>.
618  * @param tensor Pointer to {@link OH_NN_Tensor}, used to set the tensor corresponding to a single input.
619  * @param memory Pointer to {@link OH_NN_Memory}.
620  * @return Execution result of the function.
621  *         {@link OH_NN_SUCCESS} set input with memory successfully.\n
622  *         {@link OH_NN_INVALID_PARAMETER} fail to set input with memory. The possible reason for failure
623  *         is that the <b>executor</b>, <b>tensor</b> or <b>memory</b> is nullptr, or <b>inputIndex</b> is out of range,
624  *         or memory length is less than tensor length.\n
625  *         {@link OH_NN_MEMORY_ERROR} fail to set input with memory. The possible reason for failure
626  *         is that the memory error occurred such as failure to create an object.\n
627  * @deprecated since 11
628  * @useinstead {@link OH_NNExecutor_RunSync}
629  * @since 9
630  * @version 1.0
631  */
632 OH_NN_ReturnCode OH_NNExecutor_SetInputWithMemory(OH_NNExecutor *executor,
633                                                   uint32_t inputIndex,
634                                                   const OH_NN_Tensor *tensor,
635                                                   const OH_NN_Memory *memory);
636 
637 /**
638  * @brief Specifies the hardware shared memory pointed to by the {@link OH_NN_Memory} instance as the shared memory
639  *        used by a single output.
640  *
641  * In scenarios where memory needs to be managed by yourself, this method binds the execution output to the
642  * {@link OH_NN_Memory} memory instance. When computing is performed, the underlying hardware directly writes the
643  * computing result to the shared memory to which the memory instance points. By using this method, concurrent execution
644  * of input setting, computing, and read can be implemented to improve inference efficiency of a data flow.\n
645  *
646  * @param executor Executor.
647  * @param outputIndex Output Index value, which is in the same sequence of the data output when
648  *                    {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
649  *                    Assume that the value of <b>outputIndices</b> is <b>{4, 6, 8}</b> when
650  *                    {@link OH_NNModel_SpecifyInputsAndOutputs} is called.
651  *                    When the output shared memory is specified, the index value for the three outputs is
652  *                    <b>{0, 1, 2}</b>.
653  * @param memory Pointer to {@link OH_NN_Memory}.
654  * @return Execution result of the function.
655  *         {@link OH_NN_SUCCESS} set output with memory successfully.\n
656  *         {@link OH_NN_INVALID_PARAMETER} fail to set output with memory. The possible reason for failure
657  *         is that the <b>executor</b>, <b>tensor</b> or <b>memory</b> is nullptr, or <b>outputIndex</b> is
658  *         out of range, or memory length is less than tensor length.\n
659  *         {@link OH_NN_MEMORY_ERROR} fail to set output with memory. The possible reason for failure
660  *         is that the memory error occurred such as failure to create an object.\n
661  * @deprecated since 11
662  * @useinstead {@link OH_NNExecutor_RunSync}
663  * @since 9
664  * @version 1.0
665  */
666 OH_NN_ReturnCode OH_NNExecutor_SetOutputWithMemory(OH_NNExecutor *executor,
667                                                    uint32_t outputIndex,
668                                                    const OH_NN_Memory *memory);
669 
670 #ifdef __cplusplus
671 }
672 #endif // __cplusplus
673 
674 /** @} */
675 #endif // NEURAL_NETWORK_RUNTIME_H
676