• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /**
18  * @addtogroup MindSpore
19  * @{
20  *
21  * @brief provide the model reasoning related interfaces of MindSpore Lite.
22  *
23  * @Syscap SystemCapability.Ai.MindSpore
24  * @since 9
25  */
26 
27 /**
28  * @file model.h
29  *
30  * @brief provide model-related interfaces that can be used for model creation, model reasoning, and more.
31  *
32  * @library libmindspore_lite_ndk.so
33  * @since 9
34  */
35 #ifndef MINDSPORE_INCLUDE_C_API_MODEL_C_H
36 #define MINDSPORE_INCLUDE_C_API_MODEL_C_H
37 
38 #include "mindspore/tensor.h"
39 #include "mindspore/context.h"
40 #include "mindspore/status.h"
41 
42 #ifdef __cplusplus
43 extern "C" {
44 #endif
45 
46 typedef void *OH_AI_ModelHandle;
47 
48 typedef void *OH_AI_TrainCfgHandle;
49 
50 typedef struct OH_AI_TensorHandleArray {
51   size_t handle_num;
52   OH_AI_TensorHandle *handle_list;
53 } OH_AI_TensorHandleArray;
54 
55 #define OH_AI_MAX_SHAPE_NUM 32
56 typedef struct OH_AI_ShapeInfo {
57   size_t shape_num;
58   int64_t shape[OH_AI_MAX_SHAPE_NUM];
59 } OH_AI_ShapeInfo;
60 
61 typedef struct OH_AI_CallBackParam {
62   char *node_name;
63   char *node_type;
64 } OH_AI_CallBackParam;
65 
66 typedef bool (*OH_AI_KernelCallBack)(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs,
67                                      const OH_AI_CallBackParam kernel_Info);
68 
69 /**
70  * @brief Create a model object.
71  *
72  * @return Model object handle.
73  * @since 9
74  */
75 OH_AI_API OH_AI_ModelHandle OH_AI_ModelCreate(void);
76 
77 /**
78  * @brief Destroy the model object.
79  *
80  * @param model Model object handle address.
81  * @since 9
82  */
83 OH_AI_API void OH_AI_ModelDestroy(OH_AI_ModelHandle *model);
84 
85 /**
86  * @brief Build the model from model file buffer so that it can run on a device.
87  *
88  * @param model Model object handle.
89  * @param model_data Define the buffer read from a model file.
90  * @param data_size Define bytes number of model file buffer.
91  * @param model_type Define The type of model file.
92  * @param model_context Define the context used to store options during execution.
93  * @return OH_AI_Status.
94  * @since 9
95  */
96 OH_AI_API OH_AI_Status OH_AI_ModelBuild(OH_AI_ModelHandle model, const void *model_data, size_t data_size,
97                                         OH_AI_ModelType model_type, const OH_AI_ContextHandle model_context);
98 
99 /**
100  * @brief Load and build the model from model path so that it can run on a device.
101  *
102  * @param model Model object handle.
103  * @param model_path Define the model file path.
104  * @param model_type Define The type of model file.
105  * @param model_context Define the context used to store options during execution.
106  * @return OH_AI_Status.
107  * @since 9
108  */
109 OH_AI_API OH_AI_Status OH_AI_ModelBuildFromFile(OH_AI_ModelHandle model, const char *model_path,
110                                                 OH_AI_ModelType model_type, const OH_AI_ContextHandle model_context);
111 
112 /**
113  * @brief Resizes the shapes of inputs.
114  *
115  * @param model Model object handle.
116  * @param inputs The array that includes all input tensor handles.
117  * @param shape_infos Defines the new shapes of inputs, should be consistent with inputs.
118  * @param shape_info_num The num of shape_infos.
119  * @return OH_AI_Status.
120  * @since 9
121  */
122 OH_AI_API OH_AI_Status OH_AI_ModelResize(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray inputs,
123                                          OH_AI_ShapeInfo *shape_infos, size_t shape_info_num);
124 
125 /**
126  * @brief Inference model.
127  *
128  * @param model Model object handle.
129  * @param inputs The array that includes all input tensor handles.
130  * @param outputs The array that includes all output tensor handles.
131  * @param before CallBack before predict.
132  * @param after CallBack after predict.
133  * @return OH_AI_Status.
134  * @since 9
135  */
136 OH_AI_API OH_AI_Status OH_AI_ModelPredict(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray inputs,
137                                           OH_AI_TensorHandleArray *outputs, const OH_AI_KernelCallBack before,
138                                           const OH_AI_KernelCallBack after);
139 
140 /**
141  * @brief Obtains all input tensor handles of the model.
142  *
143  * @param model Model object handle.
144  * @return The array that includes all input tensor handles.
145  * @since 9
146  */
147 OH_AI_API OH_AI_TensorHandleArray OH_AI_ModelGetInputs(const OH_AI_ModelHandle model);
148 
149 /**
150  * @brief Obtains all output tensor handles of the model.
151  *
152  * @param model Model object handle.
153  * @return The array that includes all output tensor handles.
154  * @since 9
155  */
156 OH_AI_API OH_AI_TensorHandleArray OH_AI_ModelGetOutputs(const OH_AI_ModelHandle model);
157 
158 /**
159  * @brief Obtains the input tensor handle of the model by name.
160  *
161  * @param model Model object handle.
162  * @param tensor_name The name of tensor.
163  * @return The input tensor handle with the given name, if the name is not found, an NULL is returned.
164  * @since 9
165  */
166 OH_AI_API OH_AI_TensorHandle OH_AI_ModelGetInputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name);
167 
168 /**
169  * @brief Obtains the output tensor handle of the model by name.
170  *
171  * @param model Model object handle.
172  * @param tensor_name The name of tensor.
173  * @return The output tensor handle with the given name, if the name is not found, an NULL is returned.
174  * @since 9
175  */
176 OH_AI_API OH_AI_TensorHandle OH_AI_ModelGetOutputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name);
177 
178 /**
179  * @brief Create a TrainCfg object. Only valid for Lite Train.
180  *
181  * @return TrainCfg object handle.
182  * @since 11
183  */
184 OH_AI_API OH_AI_TrainCfgHandle OH_AI_TrainCfgCreate(void);
185 
186 /**
187  * @brief Destroy the train_cfg object. Only valid for Lite Train.
188  *
189  * @param train_cfg TrainCfg object handle.
190  * @since 11
191  */
192 OH_AI_API void OH_AI_TrainCfgDestroy(OH_AI_TrainCfgHandle *train_cfg);
193 
194 /**
195  * @brief Obtains part of the name that identify a loss kernel. Only valid for Lite Train.
196  *
197  * @param train_cfg TrainCfg object handle.
198  * @param num The num of loss_name.
199  * @return loss_name.
200  * @since 11
201  */
202 OH_AI_API char **OH_AI_TrainCfgGetLossName(OH_AI_TrainCfgHandle train_cfg, size_t *num);
203 
204 /**
205  * @brief Set part of the name that identify a loss kernel. Only valid for Lite Train.
206  *
207  * @param train_cfg TrainCfg object handle.
208  * @param loss_name Define part of the name that identify a loss kernel.
209  * @param num The num of loss_name.
210  * @since 11
211  */
212 OH_AI_API void OH_AI_TrainCfgSetLossName(OH_AI_TrainCfgHandle train_cfg, const char **loss_name, size_t num);
213 
214 /**
215  * @brief Obtains optimization level of the train_cfg. Only valid for Lite Train.
216  *
217  * @param train_cfg TrainCfg object handle.
218  * @return OH_AI_OptimizationLevel.
219  * @since 11
220  */
221 OH_AI_API OH_AI_OptimizationLevel OH_AI_TrainCfgGetOptimizationLevel(OH_AI_TrainCfgHandle train_cfg);
222 
223 /**
224  * @brief Set optimization level of the train_cfg. Only valid for Lite Train.
225  *
226  * @param train_cfg TrainCfg object handle.
227  * @param level The optimization level of train_cfg.
228  * @since 11
229  */
230 OH_AI_API void OH_AI_TrainCfgSetOptimizationLevel(OH_AI_TrainCfgHandle train_cfg, OH_AI_OptimizationLevel level);
231 
232 /**
233  * @brief Build the train model from model buffer so that it can run on a device. Only valid for Lite Train.
234  *
235  * @param model Model object handle.
236  * @param model_data Define the buffer read from a model file.
237  * @param data_size Define bytes number of model file buffer.
238  * @param model_type Define The type of model file.
239  * @param model_context Define the context used to store options during execution.
240  * @param train_cfg Define the config used by training.
241  * @return OH_AI_Status.
242  * @since 11
243  */
244 OH_AI_API OH_AI_Status OH_AI_TrainModelBuild(OH_AI_ModelHandle model, const void *model_data, size_t data_size,
245                                              OH_AI_ModelType model_type, const OH_AI_ContextHandle model_context,
246                                              const OH_AI_TrainCfgHandle train_cfg);
247 
248 /**
249  * @brief Build the train model from model file buffer so that it can run on a device. Only valid for Lite Train.
250  *
251  * @param model Model object handle.
252  * @param model_path Define the model path.
253  * @param model_type Define The type of model file.
254  * @param model_context Define the context used to store options during execution.
255  * @param train_cfg Define the config used by training.
256  * @return OH_AI_Status.
257  * @since 11
258  */
259 OH_AI_API OH_AI_Status OH_AI_TrainModelBuildFromFile(OH_AI_ModelHandle model, const char *model_path,
260                                                      OH_AI_ModelType model_type,
261                                                      const OH_AI_ContextHandle model_context,
262                                                      const OH_AI_TrainCfgHandle train_cfg);
263 
264 /**
265  * @brief Train model by step. Only valid for Lite Train.
266  *
267  * @param model Model object handle.
268  * @param before CallBack before predict.
269  * @param after CallBack after predict.
270  * @return OH_AI_Status.
271  * @since 11
272  */
273 OH_AI_API OH_AI_Status OH_AI_RunStep(OH_AI_ModelHandle model, const OH_AI_KernelCallBack before,
274                                      const OH_AI_KernelCallBack after);
275 
276 /**
277  * @brief Sets the Learning Rate of the training. Only valid for Lite Train.
278  *
279  * @param learning_rate to set.
280  * @return OH_AI_Status of operation.
281  * @since 11
282  */
283 OH_AI_API OH_AI_Status OH_AI_ModelSetLearningRate(OH_AI_ModelHandle model, float learning_rate);
284 
285 /**
286  * @brief Obtains the Learning Rate of the optimizer. Only valid for Lite Train.
287  *
288  * @param model Model object handle.
289  * @return Learning rate. 0.0 if no optimizer was found.
290  * @since 11
291  */
292 OH_AI_API float OH_AI_ModelGetLearningRate(OH_AI_ModelHandle model);
293 
294 /**
295  * @brief Obtains all weights tensors of the model. Only valid for Lite Train.
296  *
297  * @param model Model object handle.
298  * @return The vector that includes all gradient tensors.
299  * @since 11
300  */
301 OH_AI_API OH_AI_TensorHandleArray OH_AI_ModelGetWeights(OH_AI_ModelHandle model);
302 
303 /**
304  * @brief update weights tensors of the model. Only valid for Lite Train.
305  *
306  * @param new_weights A vector new weights.
307  * @return OH_AI_Status
308  * @since 11
309  */
310 OH_AI_API OH_AI_Status OH_AI_ModelUpdateWeights(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray new_weights);
311 
312 /**
313  * @brief Get the model running mode.
314  *
315  * @param model Model object handle.
316  * @return Is Train Mode or not.
317  * @since 11
318  */
319 OH_AI_API bool OH_AI_ModelGetTrainMode(OH_AI_ModelHandle model);
320 
321 /**
322  * @brief Set the model running mode. Only valid for Lite Train.
323  *
324  * @param model Model object handle.
325  * @param train True means model runs in Train Mode, otherwise Eval Mode.
326  * @return OH_AI_Status.
327  * @since 11
328  */
329 OH_AI_API OH_AI_Status OH_AI_ModelSetTrainMode(OH_AI_ModelHandle model, bool train);
330 
331 /**
332  * @brief Setup training with virtual batches. Only valid for Lite Train.
333  *
334  * @param model Model object handle.
335  * @param virtual_batch_multiplier Virtual batch multiplier, use any number < 1 to disable.
336  * @param lr Learning rate to use for virtual batch, -1 for internal configuration.
337  * @param momentum Batch norm momentum to use for virtual batch, -1 for internal configuration.
338  * @return OH_AI_Status.
339  * @since 11
340  */
341 OH_AI_API OH_AI_Status OH_AI_ModelSetupVirtualBatch(OH_AI_ModelHandle model, int virtual_batch_multiplier, float lr,
342                                                     float momentum);
343 
344 /**
345  * @brief Export training model from file. Only valid for Lite Train.
346  *
347  * @param model The model data.
348  * @param model_type The model file type.
349  * @param model_file The exported model file.
350  * @param quantization_type The quantification type.
351  * @param export_inference_only Whether to export a reasoning only model.
352  * @param output_tensor_name The set the name of the output tensor of the exported reasoning model, default as
353  *        empty, and export the complete reasoning model.
354  * @param num The number of output_tensor_name.
355  * @return OH_AI_Status.
356  * @since 11
357  */
358 OH_AI_API OH_AI_Status OH_AI_ExportModel(OH_AI_ModelHandle model, OH_AI_ModelType model_type, const char *model_file,
359                                          OH_AI_QuantizationType quantization_type, bool export_inference_only,
360                                          char **output_tensor_name, size_t num);
361 
362 /**
363  * @brief Export training model from buffer. Only valid for Lite Train.
364  *
365  * @param model The model data.
366  * @param model_type The model file type.
367  * @param model_data The exported model buffer.
368  * @param data_size The exported model buffer size.
369  * @param quantization_type The quantification type.
370  * @param export_inference_only Whether to export a reasoning only model.
371  * @param output_tensor_name The set the name of the output tensor of the exported reasoning model, default as
372  *        empty, and export the complete reasoning model.
373  * @param num The number of output_tensor_name.
374  * @return OH_AI_Status.
375  * @since 11
376  */
377 OH_AI_API OH_AI_Status OH_AI_ExportModelBuffer(OH_AI_ModelHandle model, OH_AI_ModelType model_type, void *model_data,
378                                                size_t *data_size, OH_AI_QuantizationType quantization_type,
379                                                bool export_inference_only, char **output_tensor_name, size_t num);
380 
381 /**
382  * @brief Export model's weights, which can be used in micro only. Only valid for Lite Train.
383  *
384  * @param model The model data.
385  * @param model_type The model file type.
386  * @param weight_file The path of exported weight file.
387  * @param is_inference Whether to export weights from a reasoning model. Currently, only support this is `true`.
388  * @param enable_fp16 Float-weight is whether to be saved in float16 format.
389  * @param changeable_weights_name The set the name of these weight tensors, whose shape is changeable.
390  * @param num The number of changeable_weights_name.
391  * @return OH_AI_Status.
392  * @since 11
393  */
394 OH_AI_API OH_AI_Status OH_AI_ExportWeightsCollaborateWithMicro(OH_AI_ModelHandle model, OH_AI_ModelType model_type,
395                                                                const char *weight_file, bool is_inference,
396                                                                bool enable_fp16, char **changeable_weights_name,
397                                                                size_t num);
398 
399 #ifdef __cplusplus
400 }
401 #endif
402 #endif  // MINDSPORE_INCLUDE_C_API_MODEL_C_H
403