• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #ifndef MINDSPORE_INCLUDE_C_API_MODEL_C_H
17 #define MINDSPORE_INCLUDE_C_API_MODEL_C_H
18 
19 #include "include/c_api/tensor_c.h"
20 #include "include/c_api/context_c.h"
21 #include "include/c_api/status_c.h"
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif
26 
27 typedef void *OH_AI_ModelHandle;
28 
29 typedef void *OH_AI_TrainCfgHandle;
30 
31 typedef struct OH_AI_TensorHandleArray {
32   size_t handle_num;
33   OH_AI_TensorHandle *handle_list;
34 } OH_AI_TensorHandleArray;
35 
36 #define OH_AI_MAX_SHAPE_NUM 32
37 typedef struct OH_AI_ShapeInfo {
38   size_t shape_num;
39   int64_t shape[OH_AI_MAX_SHAPE_NUM];
40 } OH_AI_ShapeInfo;
41 
42 typedef struct OH_AI_CallBackParam {
43   char *node_name;
44   char *node_type;
45 } OH_AI_CallBackParam;
46 
47 typedef bool (*OH_AI_KernelCallBack)(const OH_AI_TensorHandleArray inputs, const OH_AI_TensorHandleArray outputs,
48                                   const OH_AI_CallBackParam kernel_Info);
49 
50 /// \brief Create a model object.
51 ///
52 /// \return Model object handle.
53 OH_AI_API OH_AI_ModelHandle OH_AI_ModelCreate();
54 
55 /// \brief Destroy the model object.
56 ///
57 /// \param[in] model Model object handle address.
58 OH_AI_API void OH_AI_ModelDestroy(OH_AI_ModelHandle *model);
59 
60 /// \brief Set workspace for the model object. Only valid for Iot.
61 ///
62 /// \param[in] model Model object handle.
63 /// \param[in] workspace Define the workspace address.
64 /// \param[in] workspace_size Define the workspace size.
65 OH_AI_API void OH_AI_ModelSetWorkspace(OH_AI_ModelHandle model, void *workspace, size_t workspace_size);
66 
67 /// \brief Calculate the workspace size required for model inference. Only valid for Iot.
68 ///
69 /// \param[in] model Model object handle.
70 OH_AI_API size_t OH_AI_ModelCalcWorkspaceSize(OH_AI_ModelHandle model);
71 
72 /// \brief Build the model from model file buffer so that it can run on a device.
73 ///
74 /// \param[in] model Model object handle.
75 /// \param[in] model_data Define the buffer read from a model file.
76 /// \param[in] data_size Define bytes number of model file buffer.
77 /// \param[in] model_type Define The type of model file.
78 /// \param[in] model_context Define the context used to store options during execution.
79 ///
80 /// \return OH_AI_Status.
81 OH_AI_API OH_AI_Status OH_AI_ModelBuild(OH_AI_ModelHandle model, const void *model_data, size_t data_size, OH_AI_ModelType model_type,
82                                         const OH_AI_ContextHandle model_context);
83 
84 /// \brief Load and build the model from model path so that it can run on a device.
85 ///
86 /// \param[in] model Model object handle.
87 /// \param[in] model_path Define the model file path.
88 /// \param[in] model_type Define The type of model file.
89 /// \param[in] model_context Define the context used to store options during execution.
90 ///
91 /// \return OH_AI_Status.
92 OH_AI_API OH_AI_Status OH_AI_ModelBuildFromFile(OH_AI_ModelHandle model, const char *model_path, OH_AI_ModelType model_type,
93                                                 const OH_AI_ContextHandle model_context);
94 
95 /// \brief Resize the shapes of inputs.
96 ///
97 /// \param[in] model Model object handle.
98 /// \param[in] inputs The array that includes all input tensor handles.
99 /// \param[in] shape_infos Defines the new shapes of inputs, should be consistent with inputs.
100 /// \param[in] shape_info_num The num of shape_infos.
101 ///
102 /// \return OH_AI_Status.
103 OH_AI_API OH_AI_Status OH_AI_ModelResize(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray inputs, OH_AI_ShapeInfo *shape_infos,
104                               size_t shape_info_num);
105 
106 /// \brief Inference model.
107 ///
108 /// \param[in] model Model object handle.
109 /// \param[in] inputs The array that includes all input tensor handles.
110 /// \param[out] outputs The array that includes all output tensor handles.
111 /// \param[in] before CallBack before predict.
112 /// \param[in] after CallBack after predict.
113 ///
114 /// \return OH_AI_Status.
115 OH_AI_API OH_AI_Status OH_AI_ModelPredict(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray inputs, OH_AI_TensorHandleArray *outputs,
116                                           const OH_AI_KernelCallBack before, const OH_AI_KernelCallBack after);
117 
118 /// \brief Run model by step. Only valid for Iot.
119 ///
120 /// \param[in] model Model object handle.
121 /// \param[in] before CallBack before RunStep.
122 /// \param[in] after CallBack after RunStep.
123 ///
124 /// \return OH_AI_Status.
125 OH_AI_API OH_AI_Status OH_AI_ModelRunStep(OH_AI_ModelHandle model, const OH_AI_KernelCallBack before, const OH_AI_KernelCallBack after);
126 
127 /// \brief Set the model running mode. Only valid for Iot.
128 ///
129 /// \param[in] model Model object handle.
130 /// \param[in] train True means model runs in Train Mode, otherwise Eval Mode.
131 ///
132 /// \return Status of operation.
133 OH_AI_API OH_AI_Status OH_AI_ModelSetTrainMode(const OH_AI_ModelHandle model, bool train);
134 
135 /// \brief Export the weights of model to the binary file. Only valid for Iot.
136 ///
137 /// \param[in] model Model object handle.
138 /// \param[in] export_path Define the export weight file path.
139 ///
140 /// \return Status of operation.
141 OH_AI_API OH_AI_Status OH_AI_ModelExportWeight(const OH_AI_ModelHandle model, const char *export_path);
142 
143 /// \brief Obtain all input tensor handles of the model.
144 ///
145 /// \param[in] model Model object handle.
146 ///
147 /// \return The array that includes all input tensor handles.
148 OH_AI_API OH_AI_TensorHandleArray OH_AI_ModelGetInputs(const OH_AI_ModelHandle model);
149 
150 /// \brief Obtain all output tensor handles of the model.
151 ///
152 /// \param[in] model Model object handle.
153 ///
154 /// \return The array that includes all output tensor handles.
155 OH_AI_API OH_AI_TensorHandleArray OH_AI_ModelGetOutputs(const OH_AI_ModelHandle model);
156 
157 /// \brief Obtain the input tensor handle of the model by name.
158 ///
159 /// \param[in] model Model object handle.
160 /// \param[in] tensor_name The name of tensor.
161 ///
162 /// \return The input tensor handle with the given name, if the name is not found, an NULL is returned.
163 OH_AI_API OH_AI_TensorHandle OH_AI_ModelGetInputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name);
164 
165 /// \brief Obtain the output tensor handle of the model by name.
166 ///
167 /// \param[in] model Model object handle.
168 /// \param[in] tensor_name The name of tensor.
169 ///
170 /// \return The output tensor handle with the given name, if the name is not found, an NULL is returned.
171 OH_AI_API OH_AI_TensorHandle OH_AI_ModelGetOutputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name);
172 
173 /// \brief Create a TrainCfg object. Only valid for Lite Train.
174 ///
175 /// \return TrainCfg object handle.
176 OH_AI_API OH_AI_TrainCfgHandle OH_AI_TrainCfgCreate();
177 
178 /// \brief Destroy the train_cfg object. Only valid for Lite Train.
179 ///
180 /// \param[in] train_cfg TrainCfg object handle.
181 OH_AI_API void OH_AI_TrainCfgDestroy(OH_AI_TrainCfgHandle *train_cfg);
182 
183 /// \brief Obtains part of the name that identify a loss kernel. Only valid for Lite Train.
184 ///
185 /// \param[in] train_cfg TrainCfg object handle.
186 /// \param[in] num The num of loss_name.
187 ///
188 /// \return loss_name.
189 OH_AI_API char **OH_AI_TrainCfgGetLossName(OH_AI_TrainCfgHandle train_cfg, size_t *num);
190 
191 /// \brief Set part of the name that identify a loss kernel. Only valid for Lite Train.
192 ///
193 /// \param[in] train_cfg TrainCfg object handle.
194 /// \param[in] loss_name define part of the name that identify a loss kernel.
195 /// \param[in] num The num of loss_name.
196 OH_AI_API void OH_AI_TrainCfgSetLossName(OH_AI_TrainCfgHandle train_cfg, const char **loss_name, size_t num);
197 
198 /// \brief Obtains optimization level of the train_cfg. Only valid for Lite Train.
199 ///
200 /// \param[in] train_cfg TrainCfg object handle.
201 ///
202 /// \return OH_AI_OptimizationLevel.
203 OH_AI_API OH_AI_OptimizationLevel OH_AI_TrainCfgGetOptimizationLevel(OH_AI_TrainCfgHandle train_cfg);
204 
205 /// \brief Set optimization level of the train_cfg. Only valid for Lite Train.
206 ///
207 /// \param[in] train_cfg TrainCfg object handle.
208 /// \param[in] level The optimization level of train_cfg.
209 OH_AI_API void OH_AI_TrainCfgSetOptimizationLevel(OH_AI_TrainCfgHandle train_cfg, OH_AI_OptimizationLevel level);
210 
211 /// \brief Build the train model from model buffer so that it can run on a device. Only valid for Lite Train.
212 ///
213 /// \param[in] model Model object handle.
214 /// \param[in] model_data Define the buffer read from a model file.
215 /// \param[in] data_size Define bytes number of model file buffer.
216 /// \param[in] model_type Define The type of model file.
217 /// \param[in] model_context Define the context used to store options during execution.
218 /// \param[in] train_cfg Define the config used by training.
219 ///
220 /// \return OH_AI_Status.
221 OH_AI_API OH_AI_Status OH_AI_TrainModelBuild(OH_AI_ModelHandle model, const void *model_data, size_t data_size, OH_AI_ModelType model_type,
222                                   const OH_AI_ContextHandle model_context, const OH_AI_TrainCfgHandle train_cfg);
223 
224 /// \brief Build the train model from model file buffer so that it can run on a device. Only valid for Lite Train.
225 ///
226 /// \param[in] model Model object handle.
227 /// \param[in] model_path Define the model path.
228 /// \param[in] model_type Define The type of model file.
229 /// \param[in] model_context Define the context used to store options during execution.
230 /// \param[in] train_cfg Define the config used by training.
231 ///
232 /// \return OH_AI_Status.
233 OH_AI_API OH_AI_Status OH_AI_TrainModelBuildFromFile(OH_AI_ModelHandle model, const char *model_path, OH_AI_ModelType model_type,
234                                           const OH_AI_ContextHandle model_context, const OH_AI_TrainCfgHandle train_cfg);
235 
236 /// \brief Train model by step. Only valid for Lite Train.
237 ///
238 /// \param[in] model Model object handle.
239 /// \param[in] before CallBack before predict.
240 /// \param[in] after CallBack after predict.
241 ///
242 /// \return OH_AI_Status.
243 OH_AI_API OH_AI_Status OH_AI_RunStep(OH_AI_ModelHandle model, const OH_AI_KernelCallBack before, const OH_AI_KernelCallBack after);
244 
245 /// \brief Sets the Learning Rate of the training. Only valid for Lite Train.
246 ///
247 /// \param[in] learning_rate to set.
248 ///
249 /// \return OH_AI_Status of operation.
250 OH_AI_API OH_AI_Status OH_AI_ModelSetLearningRate(OH_AI_ModelHandle model, float learning_rate);
251 
252 /// \brief Obtains the Learning Rate of the optimizer. Only valid for Lite Train.
253 ///
254 /// \return Learning rate. 0.0 if no optimizer was found.
255 OH_AI_API float OH_AI_ModelGetLearningRate(OH_AI_ModelHandle model);
256 
257 /// \brief Obtains all weights tensors of the model. Only valid for Lite Train.
258 ///
259 /// \param[in] model Model object handle.
260 ///
261 /// \return The vector that includes all gradient tensors.
262 OH_AI_API OH_AI_TensorHandleArray OH_AI_ModelGetWeights(OH_AI_ModelHandle model);
263 
264 /// \brief update weights tensors of the model. Only valid for Lite Train.
265 ///
266 /// \param[in] new_weights A vector new weights.
267 ///
268 /// \return OH_AI_Status
269 OH_AI_API OH_AI_Status OH_AI_ModelUpdateWeights(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray new_weights);
270 
271 /// \brief Get the model running mode.
272 ///
273 /// \param[in] model Model object handle.
274 ///
275 /// \return Is Train Mode or not.
276 OH_AI_API bool OH_AI_ModelGetTrainMode(OH_AI_ModelHandle model);
277 
278 /// \brief Set the model running mode. Only valid for Lite Train.
279 ///
280 /// \param[in] model Model object handle.
281 /// \param[in] train True means model runs in Train Mode, otherwise Eval Mode.
282 ///
283 /// \return OH_AI_Status.
284 OH_AI_API OH_AI_Status OH_AI_ModelSetTrainMode(OH_AI_ModelHandle model, bool train);
285 
286 /// \brief Setup training with virtual batches. Only valid for Lite Train.
287 ///
288 /// \param[in] model Model object handle.
289 /// \param[in] virtual_batch_multiplier - virtual batch multiplier, use any number < 1 to disable.
290 /// \param[in] lr - learning rate to use for virtual batch, -1 for internal configuration.
291 /// \param[in] momentum - batch norm momentum to use for virtual batch, -1 for internal configuration.
292 ///
293 /// \return OH_AI_Status.
294 OH_AI_API OH_AI_Status OH_AI_ModelSetupVirtualBatch(OH_AI_ModelHandle model, int virtual_batch_multiplier, float lr, float momentum);
295 
296 /// \brief Export training model from file. Only valid for Lite Train.
297 ///
298 /// \param[in] model The model data.
299 /// \param[in] model_type The model file type.
300 /// \param[in] model_file The exported model file.
301 /// \param[in] quantization_type The quantification type.
302 /// \param[in] export_inference_only Whether to export a reasoning only model.
303 /// \param[in] output_tensor_name The set the name of the output tensor of the exported reasoning model, default as
304 /// empty, and export the complete reasoning model.
305 /// \param[in] num The number of output_tensor_name.
306 ///
307 /// \return OH_AI_Status.
308 OH_AI_API OH_AI_Status OH_AI_ExportModel(OH_AI_ModelHandle model, OH_AI_ModelType model_type, const char *model_file,
309                               OH_AI_QuantizationType quantization_type, bool export_inference_only,
310                               char **output_tensor_name, size_t num);
311 
312 /// \brief Export training model from buffer. Only valid for Lite Train.
313 ///
314 /// \param[in] model The model data.
315 /// \param[in] model_type The model file type.
316 /// \param[in] model_data The exported model buffer.
317 /// \param[in] data_size The exported model buffer size.
318 /// \param[in] quantization_type The quantification type.
319 /// \param[in] export_inference_only Whether to export a reasoning only model.
320 /// \param[in] output_tensor_name The set the name of the output tensor of the exported reasoning model, default as
321 /// empty, and export the complete reasoning model.
322 /// \param[in] num The number of output_tensor_name.
323 ///
324 /// \return OH_AI_Status.
325 OH_AI_API OH_AI_Status OH_AI_ExportModelBuffer(OH_AI_ModelHandle model, OH_AI_ModelType model_type, char **model_data, size_t *data_size,
326                                     OH_AI_QuantizationType quantization_type, bool export_inference_only,
327                                     char **output_tensor_name, size_t num);
328 
329 /// \brief Export model's weights, which can be used in micro only. Only valid for Lite Train.
330 ///
331 /// \param[in] model The model data.
332 /// \param[in] model_type The model file type.
333 /// \param[in] weight_file The path of exported weight file.
334 /// \param[in] is_inference Whether to export weights from a reasoning model. Currently, only support this is `true`.
335 /// \param[in] enable_fp16 Float-weight is whether to be saved in float16 format.
336 /// \param[in] changeable_weights_name The set the name of these weight tensors, whose shape is changeable.
337 /// \param[in] num The number of changeable_weights_name.
338 ///
339 /// \return OH_AI_Status.
340 OH_AI_API OH_AI_Status OH_AI_ExportWeightsCollaborateWithMicro(OH_AI_ModelHandle model, OH_AI_ModelType model_type,
341                                                     const char *weight_file, bool is_inference, bool enable_fp16,
342                                                     char **changeable_weights_name, size_t num);
343 
344 #ifdef __cplusplus
345 }
346 #endif
347 #endif  // MINDSPORE_INCLUDE_C_API_MODEL_C_H
348