1 /**
2 * Copyright 2021 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include "include/c_api/model_c.h"
17 #include <vector>
18 #include <cstdint>
19 #include "include/api/context.h"
20 #include "include/api/types.h"
21 #include "src/cxx_api/tensor/tensor_impl.h"
22 #include "src/cxx_api/converters.h"
23 #include "src/lite_session.h"
24
25 namespace mindspore {
26 class ModelC {
27 public:
ModelC()28 ModelC() : session_(nullptr), context_(nullptr) {}
~ModelC()29 ~ModelC() {
30 for (auto &impl : tensor_map_) {
31 delete impl.second;
32 }
33 }
34
35 Status Build(const void *model_data, size_t data_size, ModelType model_type, const ContextC *model_context);
36 Status Build(const std::string &model_path, ModelType model_type, const ContextC *model_context);
37 Status Resize(const std::vector<MSTensor::Impl *> &inputs, const std::vector<std::vector<int64_t>> &shapes);
38
39 Status Predict(const OH_AI_TensorHandle *inputs, size_t input_num, OH_AI_TensorHandle **outputs, size_t *output_num,
40 const OH_AI_KernelCallBack &before, const OH_AI_KernelCallBack &after);
41
42 MSTensor::Impl **GetInputs(size_t *input_num);
43 MSTensor::Impl **GetOutputs(size_t *output_num);
44
45 private:
46 std::shared_ptr<lite::LiteSession> session_ = nullptr;
47 std::shared_ptr<const ContextC> context_ = nullptr;
48 std::map<mindspore::tensor::MSTensor *, MSTensor::Impl *> tensor_map_;
49 std::vector<MSTensor::Impl *> inputs_;
50 std::vector<MSTensor::Impl *> outputs_;
51 Status RunGraph(const OH_AI_KernelCallBack &before, const OH_AI_KernelCallBack &after);
52 void ResetTensorData(std::vector<void *> old_data, std::vector<tensor::MSTensor *> tensors);
53 MSTensor::Impl *TensorToTensorImpl(mindspore::tensor::MSTensor *tensor);
54 };
55
Build(const void * model_data,size_t data_size,ModelType model_type,const ContextC * model_context)56 Status ModelC::Build(const void *model_data, size_t data_size, ModelType model_type, const ContextC *model_context) {
57 if(context_.get() != model_context){
58 context_.reset(model_context);
59 }
60 session_ = std::make_shared<lite::LiteSession>();
61 if (session_ == nullptr) {
62 MS_LOG(ERROR) << "create session failed";
63 return kLiteNullptr;
64 }
65 auto ret = session_->Init(ContextUtils::Convert(model_context));
66 if (ret != mindspore::lite::RET_OK) {
67 MS_LOG(ERROR) << "init session failed";
68 return static_cast<StatusCode>(ret);
69 }
70 ret = session_->LoadModelAndCompileByBuf(static_cast<const char *>(model_data), data_size);
71 if (ret != RET_OK) {
72 MS_LOG(ERROR) << "Load and compile failed";
73 }
74 return static_cast<StatusCode>(ret);
75 }
76
Build(const std::string & model_path,ModelType model_type,const ContextC * model_context)77 Status ModelC::Build(const std::string &model_path, ModelType model_type, const ContextC *model_context) {
78 if(context_.get() != model_context){
79 context_.reset(model_context);
80 }
81 session_ = std::make_shared<lite::LiteSession>();
82 if (session_ == nullptr) {
83 MS_LOG(ERROR) << "create session failed";
84 return kLiteNullptr;
85 }
86 auto ret = session_->Init(ContextUtils::Convert(model_context));
87 if (ret != mindspore::lite::RET_OK) {
88 MS_LOG(ERROR) << "init session failed";
89 return static_cast<StatusCode>(ret);
90 }
91 ret = session_->LoadModelAndCompileByPath(model_path);
92 if (ret != RET_OK) {
93 MS_LOG(ERROR) << "Load and compile failed";
94 }
95 return static_cast<StatusCode>(ret);
96 }
97
Resize(const std::vector<MSTensor::Impl * > & inputs,const std::vector<std::vector<int64_t>> & shapes)98 Status ModelC::Resize(const std::vector<MSTensor::Impl *> &inputs, const std::vector<std::vector<int64_t>> &shapes) {
99 std::vector<tensor::MSTensor *> inner_input;
100 size_t input_num = inputs.size();
101 for (size_t i = 0; i < input_num; i++) {
102 auto input = inputs[i];
103 if (input == nullptr || input->lite_tensor() == nullptr) {
104 MS_LOG(ERROR) << "Input tensor is null.";
105 return kLiteInputTensorError;
106 }
107 inner_input.push_back(input->lite_tensor());
108 }
109 size_t shape_num = shapes.size();
110 std::vector<std::vector<int32_t>> inner_shapes(shape_num);
111 for (size_t i = 0; i < shape_num; i++) {
112 std::transform(shapes[i].begin(), shapes[i].end(), std::back_inserter(inner_shapes[i]),
113 [](int64_t value) { return static_cast<int32_t>(value); });
114 }
115 if (session_ == nullptr) {
116 MS_LOG(ERROR) << "Session implement is null.";
117 return kLiteNullptr;
118 }
119 auto ret = session_->Resize(inner_input, inner_shapes);
120 return static_cast<StatusCode>(ret);
121 }
122
ResetTensorData(std::vector<void * > old_data,std::vector<tensor::MSTensor * > tensors)123 void ModelC::ResetTensorData(std::vector<void *> old_data, std::vector<tensor::MSTensor *> tensors) {
124 for (size_t j = 0; j < old_data.size(); j++) {
125 tensors.at(j)->set_data(old_data.at(j));
126 }
127 }
128
Predict(const OH_AI_TensorHandle * inputs,size_t input_num,OH_AI_TensorHandle ** outputs,size_t * output_num,const OH_AI_KernelCallBack & before,const OH_AI_KernelCallBack & after)129 Status ModelC::Predict(const OH_AI_TensorHandle *inputs, size_t input_num, OH_AI_TensorHandle **outputs,
130 size_t *output_num, const OH_AI_KernelCallBack &before, const OH_AI_KernelCallBack &after) {
131 if (outputs == nullptr || session_ == nullptr) {
132 MS_LOG(ERROR) << "param is nullptr.";
133 return kLiteError;
134 }
135 auto model_inputs = session_->GetInputs();
136 if (model_inputs.size() != input_num) {
137 MS_LOG(ERROR) << "Wrong input size.";
138 return kLiteError;
139 }
140 std::vector<void *> old_data;
141 for (size_t i = 0; i < input_num; i++) {
142 auto real_input = model_inputs[i];
143 auto user_input = static_cast<mindspore::MSTensor::Impl *>(inputs[i]);
144 if (user_input->DataType() != static_cast<DataType>(real_input->data_type())) {
145 ResetTensorData(old_data, model_inputs);
146 MS_LOG(ERROR) << "DataType does not match, input:" << user_input->Name()
147 << ", real:" << real_input->tensor_name();
148 return kLiteInputTensorError;
149 }
150 if (user_input->Data() == nullptr) {
151 ResetTensorData(old_data, model_inputs);
152 MS_LOG(ERROR) << "Tensor " << user_input->Name() << " has no data.";
153 return kLiteInputTensorError;
154 }
155 old_data.push_back(real_input->data());
156 if (real_input->data_type() == kObjectTypeString) {
157 std::vector<int32_t> shape;
158 std::transform(user_input->Shape().begin(), user_input->Shape().end(), std::back_inserter(shape),
159 [](int64_t value) { return static_cast<int32_t>(value); });
160 real_input->set_shape(shape);
161 real_input->set_data(user_input->MutableData());
162 } else {
163 if (user_input->MutableData() != real_input->data()) {
164 if (real_input->Size() != user_input->DataSize()) {
165 ResetTensorData(old_data, model_inputs);
166 MS_LOG(ERROR) << "Tensor " << user_input->Name() << " has wrong data size.";
167 return kLiteInputTensorError;
168 }
169 real_input->set_data(user_input->MutableData());
170 }
171 }
172 }
173 auto ret = RunGraph(before, after);
174 ResetTensorData(old_data, model_inputs);
175 if (ret != kSuccess) {
176 MS_LOG(ERROR) << "Run graph failed.";
177 return ret;
178 }
179
180 *outputs = reinterpret_cast<OH_AI_TensorHandle *>(GetOutputs(output_num));
181 return kSuccess;
182 }
183
RunGraph(const OH_AI_KernelCallBack & before,const OH_AI_KernelCallBack & after)184 Status ModelC::RunGraph(const OH_AI_KernelCallBack &before, const OH_AI_KernelCallBack &after) {
185 if (before == nullptr || after == nullptr) {
186 auto ret = session_->RunGraph();
187 return static_cast<StatusCode>(ret);
188 }
189 auto before_call_back = [&](const std::vector<mindspore::tensor::MSTensor *> &before_inputs,
190 const std::vector<mindspore::tensor::MSTensor *> &before_outputs,
191 const CallBackParam &call_param) {
192 std::vector<mindspore::MSTensor::Impl> inputs_impl;
193 std::vector<mindspore::MSTensor::Impl> outputs_impl;
194 std::vector<OH_AI_TensorHandle> op_inputs;
195 std::vector<OH_AI_TensorHandle> op_outputs;
196 size_t op_input_num = before_inputs.size();
197 for (size_t i = 0; i < op_input_num; i++) {
198 inputs_impl.emplace_back(before_inputs[i]);
199 op_inputs.push_back(&(inputs_impl.back()));
200 }
201 size_t op_output_num = before_outputs.size();
202 for (size_t i = 0; i < op_output_num; i++) {
203 outputs_impl.emplace_back(before_outputs[i]);
204 op_outputs.push_back(&(outputs_impl.back()));
205 }
206 const OH_AI_CallBackParam op_info = {const_cast<char *>(call_param.node_name.c_str()),
207 const_cast<char *>(call_param.node_type.c_str())};
208 OH_AI_TensorHandleArray inputs = {op_input_num, op_inputs.data()};
209 OH_AI_TensorHandleArray outputs = {op_output_num, op_outputs.data()};
210 return before(inputs, outputs, op_info);
211 };
212
213 auto after_call_back = [&](const std::vector<mindspore::tensor::MSTensor *> &after_inputs,
214 const std::vector<mindspore::tensor::MSTensor *> &after_outputs,
215 const CallBackParam &call_param) {
216 std::vector<mindspore::MSTensor::Impl> inputs_impl;
217 std::vector<mindspore::MSTensor::Impl> outputs_impl;
218 std::vector<OH_AI_TensorHandle> op_inputs;
219 std::vector<OH_AI_TensorHandle> op_outputs;
220 size_t op_input_num = after_inputs.size();
221 for (size_t i = 0; i < op_input_num; i++) {
222 inputs_impl.emplace_back(after_inputs[i]);
223 op_inputs.push_back(&(inputs_impl.back()));
224 }
225 size_t op_output_num = after_outputs.size();
226 for (size_t i = 0; i < op_output_num; i++) {
227 outputs_impl.emplace_back(after_outputs[i]);
228 op_outputs.push_back(&(outputs_impl.back()));
229 }
230 const OH_AI_CallBackParam op_info = {const_cast<char *>(call_param.node_name.c_str()),
231 const_cast<char *>(call_param.node_type.c_str())};
232 OH_AI_TensorHandleArray inputs = {op_input_num, op_inputs.data()};
233 OH_AI_TensorHandleArray outputs = {op_output_num, op_outputs.data()};
234 return after(inputs, outputs, op_info);
235 };
236 auto ret = session_->RunGraph(before_call_back, after_call_back);
237 return static_cast<StatusCode>(ret);
238 }
239
TensorToTensorImpl(mindspore::tensor::MSTensor * tensor)240 MSTensor::Impl *ModelC::TensorToTensorImpl(mindspore::tensor::MSTensor *tensor) {
241 MSTensor::Impl *impl = nullptr;
242 auto iter = tensor_map_.find(tensor);
243 if (iter != tensor_map_.end()) {
244 impl = iter->second;
245 } else {
246 impl = new (std::nothrow) MSTensor::Impl(tensor);
247 if (impl == nullptr || impl->lite_tensor() == nullptr) {
248 MS_LOG(ERROR) << "Create tensor failed.";
249 return nullptr;
250 }
251 tensor_map_[tensor] = impl;
252 }
253 return impl;
254 }
255
GetInputs(size_t * input_num)256 MSTensor::Impl **ModelC::GetInputs(size_t *input_num) {
257 if (session_ == nullptr || input_num == nullptr) {
258 MS_LOG(ERROR) << "Session is null.";
259 return nullptr;
260 }
261 auto inputs = session_->GetInputs();
262 *input_num = inputs.size();
263 if (inputs_.capacity() < *input_num) {
264 inputs_.reserve(*input_num);
265 }
266 inputs_.clear();
267 std::transform(inputs.begin(), inputs.end(), std::back_inserter(inputs_),
268 [&](tensor::MSTensor *input) { return TensorToTensorImpl(input); });
269 return inputs_.data();
270 }
271
GetOutputs(size_t * output_num)272 MSTensor::Impl **ModelC::GetOutputs(size_t *output_num) {
273 if (session_ == nullptr || output_num == nullptr) {
274 MS_LOG(ERROR) << "Session is null.";
275 return nullptr;
276 }
277 auto outputs = session_->GetOutputs();
278 *output_num = outputs.size();
279 if (outputs_.capacity() < *output_num) {
280 outputs_.reserve(*output_num);
281 }
282 outputs_.clear();
283 std::transform(outputs.begin(), outputs.end(), std::back_inserter(outputs_),
284 [&](std::unordered_map<std::string, mindspore::tensor::MSTensor *>::value_type iter) {
285 return TensorToTensorImpl(iter.second);
286 });
287 return outputs_.data();
288 }
289 } // namespace mindspore
290
OH_AI_ModelCreate()291 OH_AI_ModelHandle OH_AI_ModelCreate() {
292 auto impl = new (std::nothrow) mindspore::ModelC();
293 if (impl == nullptr) {
294 MS_LOG(ERROR) << "Model implement is null.";
295 return nullptr;
296 }
297 return static_cast<OH_AI_ModelHandle>(impl);
298 }
299
OH_AI_ModelDestroy(OH_AI_ModelHandle * model)300 void OH_AI_ModelDestroy(OH_AI_ModelHandle *model) {
301 if (model != nullptr && *model != nullptr) {
302 auto impl = static_cast<mindspore::ModelC *>(*model);
303 delete impl;
304 *model = nullptr;
305 }
306 }
307
OH_AI_ModelSetWorkspace(OH_AI_ModelHandle model,void * workspace,size_t workspace_size)308 void OH_AI_ModelSetWorkspace(OH_AI_ModelHandle model, void *workspace, size_t workspace_size) {
309 MS_LOG(ERROR) << "Unsupported Feature.";
310 return;
311 }
312
OH_AI_ModelBuild(OH_AI_ModelHandle model,const void * model_data,size_t data_size,OH_AI_ModelType model_type,const OH_AI_ContextHandle model_context)313 OH_AI_Status OH_AI_ModelBuild(OH_AI_ModelHandle model, const void *model_data, size_t data_size,
314 OH_AI_ModelType model_type, const OH_AI_ContextHandle model_context) {
315 if (model == nullptr || model_data == nullptr || model_context == nullptr) {
316 MS_LOG(ERROR) << "param is nullptr.";
317 return OH_AI_STATUS_LITE_NULLPTR;
318 }
319 if (model_type == OH_AI_MODELTYPE_INVALID) {
320 MS_LOG(ERROR) << "param is invalid.";
321 return OH_AI_STATUS_LITE_PARAM_INVALID;
322 }
323 mindspore::ContextC *context = static_cast<mindspore::ContextC *>(model_context);
324 auto impl = static_cast<mindspore::ModelC *>(model);
325 auto ret = impl->Build(model_data, data_size, static_cast<mindspore::ModelType>(model_type), context);
326 return static_cast<OH_AI_Status>(ret.StatusCode());
327 }
328
OH_AI_ModelBuildFromFile(OH_AI_ModelHandle model,const char * model_path,OH_AI_ModelType model_type,const OH_AI_ContextHandle model_context)329 OH_AI_Status OH_AI_ModelBuildFromFile(OH_AI_ModelHandle model, const char *model_path, OH_AI_ModelType model_type,
330 const OH_AI_ContextHandle model_context) {
331 if (model == nullptr || model_path == nullptr || model_context == nullptr) {
332 MS_LOG(ERROR) << "param is nullptr.";
333 return OH_AI_STATUS_LITE_NULLPTR;
334 }
335 if (model_type == OH_AI_MODELTYPE_INVALID) {
336 MS_LOG(ERROR) << "param is invalid.";
337 return OH_AI_STATUS_LITE_PARAM_INVALID;
338 }
339 mindspore::ContextC *context = static_cast<mindspore::ContextC *>(model_context);
340 auto impl = static_cast<mindspore::ModelC *>(model);
341 auto ret = impl->Build(model_path, static_cast<mindspore::ModelType>(model_type), context);
342 return static_cast<OH_AI_Status>(ret.StatusCode());
343 }
344
OH_AI_ModelResize(OH_AI_ModelHandle model,const OH_AI_TensorHandleArray inputs,OH_AI_ShapeInfo * shape_infos,size_t shape_info_num)345 OH_AI_Status OH_AI_ModelResize(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray inputs,
346 OH_AI_ShapeInfo *shape_infos, size_t shape_info_num) {
347 if (model == nullptr || shape_infos == nullptr) {
348 MS_LOG(ERROR) << "param is nullptr.";
349 return OH_AI_STATUS_LITE_NULLPTR;
350 }
351 std::vector<mindspore::MSTensor::Impl *> vec_inputs;
352 std::transform(inputs.handle_list, inputs.handle_list + inputs.handle_num, std::back_inserter(vec_inputs),
353 [](OH_AI_TensorHandle value) { return static_cast<mindspore::MSTensor::Impl *>(value); });
354 std::vector<std::vector<int64_t>> vec_dims;
355 for (size_t i = 0; i < shape_info_num; i++) {
356 std::vector<int64_t> shape(shape_infos[i].shape, shape_infos[i].shape + shape_infos[i].shape_num);
357 vec_dims.push_back(shape);
358 }
359 auto impl = static_cast<mindspore::ModelC *>(model);
360 auto ret = impl->Resize(vec_inputs, vec_dims);
361 return static_cast<OH_AI_Status>(ret.StatusCode());
362 }
363
OH_AI_ModelPredict(OH_AI_ModelHandle model,const OH_AI_TensorHandleArray inputs,OH_AI_TensorHandleArray * outputs,const OH_AI_KernelCallBack before,const OH_AI_KernelCallBack after)364 OH_AI_Status OH_AI_ModelPredict(OH_AI_ModelHandle model, const OH_AI_TensorHandleArray inputs,
365 OH_AI_TensorHandleArray *outputs, const OH_AI_KernelCallBack before,
366 const OH_AI_KernelCallBack after) {
367 if (model == nullptr) {
368 MS_LOG(ERROR) << "param is nullptr.";
369 return OH_AI_STATUS_LITE_NULLPTR;
370 }
371 auto impl = static_cast<mindspore::ModelC *>(model);
372 auto ret = impl->Predict(inputs.handle_list, inputs.handle_num, &(outputs->handle_list), &(outputs->handle_num),
373 before, after);
374 if (!ret.IsOk()) {
375 MS_LOG(ERROR) << "Predict fail, ret :" << ret;
376 }
377 return static_cast<OH_AI_Status>(ret.StatusCode());
378 }
379
OH_AI_ModelGetInputs(const OH_AI_ModelHandle model)380 OH_AI_TensorHandleArray OH_AI_ModelGetInputs(const OH_AI_ModelHandle model) {
381 if (model == nullptr) {
382 MS_LOG(ERROR) << "param is nullptr.";
383 return {0, nullptr};
384 }
385 auto impl = static_cast<mindspore::ModelC *>(model);
386 size_t input_num;
387 auto handles = reinterpret_cast<OH_AI_TensorHandle *>(impl->GetInputs(&input_num));
388 return {input_num, handles};
389 }
390
OH_AI_ModelGetOutputs(const OH_AI_ModelHandle model)391 OH_AI_TensorHandleArray OH_AI_ModelGetOutputs(const OH_AI_ModelHandle model) {
392 if (model == nullptr) {
393 MS_LOG(ERROR) << "param is nullptr.";
394 return {0, nullptr};
395 }
396 auto impl = static_cast<mindspore::ModelC *>(model);
397 size_t output_num;
398 auto handles = reinterpret_cast<OH_AI_TensorHandle *>(impl->GetOutputs(&output_num));
399 return {output_num, handles};
400 }
401
OH_AI_ModelGetInputByTensorName(const OH_AI_ModelHandle model,const char * tensor_name)402 OH_AI_TensorHandle OH_AI_ModelGetInputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name) {
403 if (model == nullptr || tensor_name == nullptr) {
404 MS_LOG(ERROR) << "param is nullptr.";
405 return nullptr;
406 }
407 auto impl = static_cast<mindspore::ModelC *>(model);
408 size_t input_num;
409 auto inputs = impl->GetInputs(&input_num);
410 for (size_t i = 0; i < input_num; i++) {
411 if (inputs[i]->Name() == tensor_name) {
412 return static_cast<OH_AI_TensorHandle>(inputs[i]);
413 }
414 }
415 MS_LOG(ERROR) << "tensor is not exist.";
416 return nullptr;
417 }
418
OH_AI_ModelGetOutputByTensorName(const OH_AI_ModelHandle model,const char * tensor_name)419 OH_AI_TensorHandle OH_AI_ModelGetOutputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name) {
420 if (model == nullptr || tensor_name == nullptr) {
421 MS_LOG(ERROR) << "param is nullptr.";
422 return nullptr;
423 }
424 auto impl = static_cast<mindspore::ModelC *>(model);
425 size_t output_num;
426 auto outputs = impl->GetOutputs(&output_num);
427 for (size_t i = 0; i < output_num; i++) {
428 if (outputs[i]->Name() == tensor_name) {
429 return static_cast<OH_AI_TensorHandle>(outputs[i]);
430 }
431 }
432 MS_LOG(ERROR) << "tensor is not exist.";
433 return nullptr;
434 }
435