1 /**
2 * Copyright 2021-2022 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/litert/cxx_api/model/model_impl.h"
18 #include <memory>
19 #include <algorithm>
20 #include <map>
21 #include <set>
22 #include <string>
23 #include <utility>
24 #include <vector>
25 #include "include/api/types.h"
26 #include "include/api/context.h"
27 #include "src/litert/inner_allocator.h"
28 #include "src/litert/cxx_api/converters.h"
29 #include "src/litert/cxx_api/graph/graph_data.h"
30 #include "src/litert/cxx_api/tensor/tensor_impl.h"
31 #include "src/litert/cxx_api/tensor_utils.h"
32 #include "src/common/log_adapter.h"
33 #include "src/litert/lite_session.h"
34 #include "src/litert/model_manager.h"
35 #include "src/common/file_utils.h"
36 #if defined(ENABLE_PRE_INFERENCE) && defined(__linux__) && !defined(Debug)
37 #include "src/common/random_data_generator.h"
38 #endif
39 #include "src/common/config_file.h"
40 #include "src/litert/cpu_info.h"
41 #include "src/litert/pack_weight_manager.h"
42 #ifdef SUPPORT_NNRT_METAGRAPH
43 #include "src/litert/cache_session.h"
44 #include "src/litert/delegate/nnrt/extension_options_parser.h"
45 #endif
46
47 namespace mindspore {
48 namespace {
49 const char *const kExecutionPlan = "execution_plan";
50 constexpr size_t kMaxSectionNum = 100;
51 constexpr size_t kMaxConfigNumPerSection = 1000;
52 constexpr auto kSharingWorkspaceSection = "inner_common"; // don't support external user configuration
53 constexpr auto kSharingWorkspaceKey = "inner_sharing_workspace";
54 constexpr auto kSharingWorkspaceValue = "true";
55 constexpr auto kBuildSection = "build_session";
56 constexpr auto kObfRatioKey = "obf_ratio";
57 constexpr auto kObfNodeName = "obf_op-obf_mul";
58 constexpr size_t kFloatSize = 4;
59 constexpr int kDataIndex = 1;
60 #if defined(ENABLE_PRE_INFERENCE) && defined(__linux__) && !defined(Debug)
61 constexpr auto kCommonSection = "common"; // support external user configuration
62 constexpr auto kEnablePreInferenceKey = "enable_pre_inference";
63 constexpr auto kEnablePreInferenceValue = "true";
64 #endif
65 } // namespace
66 using mindspore::lite::RET_ERROR;
67 using mindspore::lite::RET_OK;
68
IsEnableModelSharing(const std::string & model_path)69 bool ModelImpl::IsEnableModelSharing(const std::string &model_path) {
70 const std::set<std::string> &model_path_set = ModelManager::GetInstance().GetModelPath();
71 return (model_path_set.find(model_path) != model_path_set.end());
72 }
73
IsEnableModelSharing(const std::pair<const void *,size_t> & model_buff)74 bool ModelImpl::IsEnableModelSharing(const std::pair<const void *, size_t> &model_buff) {
75 const std::set<std::pair<const void *, size_t>> &model_buff_set = ModelManager::GetInstance().GetModelBuff();
76 return (model_buff_set.find(model_buff) != model_buff_set.end());
77 }
78
CreateTrainSessionCallbackHolder(CreateTrainSessionProto * proto)79 CreateTrainSessionProto *CreateTrainSessionCallbackHolder(CreateTrainSessionProto *proto) {
80 static CreateTrainSessionProto *proto_ = nullptr;
81 if (proto != nullptr) {
82 proto_ = proto;
83 }
84 return proto_;
85 }
86
87 #if defined(ENABLE_PRE_INFERENCE) && defined(__linux__) && !defined(Debug)
BuildAndRun(const void * model_data,size_t data_size,ModelType model_type,const std::shared_ptr<Context> & model_context)88 Status ModelImpl::BuildAndRun(const void *model_data, size_t data_size, ModelType model_type,
89 const std::shared_ptr<Context> &model_context) {
90 Status ret = this->Build(model_data, data_size, model_type, model_context);
91 if (ret != kSuccess) {
92 return ret;
93 }
94 for (auto &tensor : this->GetInputs()) {
95 if (tensor.Shape().empty() || tensor.DataSize() == 0 ||
96 std::find(tensor.Shape().begin(), tensor.Shape().end(), -1) != tensor.Shape().end()) {
97 return kSuccess;
98 }
99 auto status = lite::GenRandomData(&tensor);
100 if (status != RET_OK) {
101 return kLiteError;
102 }
103 }
104 ret = this->Predict(nullptr, nullptr);
105 if (ret != kSuccess) {
106 return ret;
107 }
108 return kSuccess;
109 }
110
BuildAndRun(const std::string & model_path,ModelType model_type,const std::shared_ptr<Context> & model_context)111 Status ModelImpl::BuildAndRun(const std::string &model_path, ModelType model_type,
112 const std::shared_ptr<Context> &model_context) {
113 Status ret = this->Build(model_path, model_type, model_context);
114 if (ret != kSuccess) {
115 return ret;
116 }
117 for (auto &tensor : this->GetInputs()) {
118 if (tensor.Shape().empty() || tensor.DataSize() == 0 ||
119 std::find(tensor.Shape().begin(), tensor.Shape().end(), -1) != tensor.Shape().end()) {
120 return kSuccess;
121 }
122 auto status = lite::GenRandomData(&tensor);
123 if (status != RET_OK) {
124 return kLiteError;
125 }
126 }
127 ret = this->Predict(nullptr, nullptr);
128 if (ret != kSuccess) {
129 return ret;
130 }
131 return kSuccess;
132 }
133
BuildAndRun()134 Status ModelImpl::BuildAndRun() {
135 Status ret = this->Build();
136 if (ret != kSuccess) {
137 return ret;
138 }
139 for (auto &tensor : this->GetInputs()) {
140 if (tensor.Shape().empty() || tensor.DataSize() == 0 ||
141 std::find(tensor.Shape().begin(), tensor.Shape().end(), -1) != tensor.Shape().end()) {
142 return kSuccess;
143 }
144 auto status = lite::GenRandomData(&tensor);
145 if (status != RET_OK) {
146 return kLiteError;
147 }
148 }
149 ret = this->Predict(nullptr, nullptr);
150 if (ret != kSuccess) {
151 return ret;
152 }
153 return kSuccess;
154 }
155
IsEnablePreInference()156 bool ModelImpl::IsEnablePreInference() {
157 if (config_info_.find(kCommonSection) == config_info_.end()) {
158 return true;
159 }
160 auto common_config = config_info_.at(kCommonSection);
161 if (common_config.find(kEnablePreInferenceKey) == common_config.end()) {
162 return true;
163 }
164 return common_config.at(kEnablePreInferenceKey) == kEnablePreInferenceValue;
165 }
166 #endif
Build(const void * model_data,size_t data_size,ModelType model_type,const std::shared_ptr<Context> & ms_context)167 Status ModelImpl::Build(const void *model_data, size_t data_size, ModelType model_type,
168 const std::shared_ptr<Context> &ms_context) {
169 if (session_ != nullptr) {
170 MS_LOG(ERROR) << "Model has been called Build";
171 return kLiteModelRebuild;
172 }
173 if (model_data == nullptr) {
174 MS_LOG(ERROR) << "The input model buffer is nullptr.";
175 return kLiteNullptr;
176 }
177 if (data_size == 0) {
178 MS_LOG(ERROR) << "The input model buffer size is 0.";
179 return kLiteInputParamInvalid;
180 }
181 if (!PlatformInstructionSetSupportCheck()) {
182 MS_LOG(ERROR) << "The platform exist don't support's instruction.";
183 return kLiteNotSupport;
184 }
185
186 context_ = ms_context;
187 bool model_sharing_flag = IsEnableModelSharing(std::make_pair(model_data, data_size));
188 if (model_sharing_flag) {
189 auto ret = UpdateConfig(kSharingWorkspaceSection, std::make_pair(kSharingWorkspaceKey, kSharingWorkspaceValue));
190 if (ret != kSuccess) {
191 MS_LOG(ERROR) << "UpdateConfig " << kSharingWorkspaceKey << " failed.";
192 return ret;
193 }
194 }
195 auto session = std::shared_ptr<lite::LiteSession>(CreateLiteSession(ContextUtils::Convert(ms_context.get())));
196 if (session == nullptr) {
197 MS_LOG(ERROR) << "Allocate session failed.";
198 return kLiteNullptr;
199 }
200
201 auto ret = session->LoadModelAndCompileByBuf(static_cast<const char *>(model_data), model_type, data_size);
202 if (ret != RET_OK) {
203 MS_LOG(ERROR) << "Init session failed";
204 return kLiteError;
205 }
206
207 session_.swap(session);
208 MS_LOG(DEBUG) << "Build model success.";
209 return kSuccess;
210 }
211
Build(const std::string & model_path,ModelType model_type,const std::shared_ptr<Context> & ms_context)212 Status ModelImpl::Build(const std::string &model_path, ModelType model_type,
213 const std::shared_ptr<Context> &ms_context) {
214 if (session_ != nullptr) {
215 MS_LOG(ERROR) << "Model has been called Build";
216 return kLiteModelRebuild;
217 }
218 if (!PlatformInstructionSetSupportCheck()) {
219 MS_LOG(ERROR) << "The platform exist don't support's instruction.";
220 return kLiteNotSupport;
221 }
222
223 bool model_sharing_flag = IsEnableModelSharing(model_path);
224 if (model_sharing_flag) {
225 auto ret = UpdateConfig(kSharingWorkspaceSection, std::make_pair(kSharingWorkspaceKey, kSharingWorkspaceValue));
226 if (ret != kSuccess) {
227 MS_LOG(ERROR) << "UpdateConfig " << kSharingWorkspaceKey << " failed.";
228 return ret;
229 }
230 }
231 auto session = std::shared_ptr<lite::LiteSession>(CreateLiteSession(ContextUtils::Convert(ms_context.get())));
232 if (session == nullptr) {
233 MS_LOG(ERROR) << "Allocate session failed.";
234 return kLiteNullptr;
235 }
236
237 auto ret = session->LoadModelAndCompileByPath(model_path, model_type);
238 if (ret != RET_OK) {
239 MS_LOG(ERROR) << "Init session failed";
240 return kLiteError;
241 }
242
243 session_.swap(session);
244 MS_LOG(DEBUG) << "Build model success.";
245 return kSuccess;
246 }
247
Build()248 Status ModelImpl::Build() {
249 if (session_ != nullptr) {
250 MS_LOG(ERROR) << "Model has been called Build";
251 return kLiteModelRebuild;
252 }
253 MS_LOG(DEBUG) << "Start build model.";
254 if (graph_ == nullptr || graph_->graph_data_ == nullptr) {
255 MS_LOG(ERROR) << "Invalid graph.";
256 return kLiteNullptr;
257 }
258
259 if (context_ == nullptr) {
260 MS_LOG(ERROR) << "Invalid context.";
261 return kLiteNullptr;
262 }
263
264 if (!PlatformInstructionSetSupportCheck()) {
265 MS_LOG(ERROR) << "The platform exist don't support's instruction.";
266 return kLiteNotSupport;
267 }
268
269 auto inner_context = ContextUtils::Convert(context_.get());
270 if (inner_context == nullptr) {
271 MS_LOG(ERROR) << "Failed to convert Context to Lite Context";
272 return kLiteNullptr;
273 }
274
275 auto create_callback = CreateTrainSessionCallbackHolder();
276 if (create_callback != nullptr) {
277 auto train_context = ContextUtils::Convert(context_.get());
278 if (train_context == nullptr) {
279 MS_LOG(ERROR) << "Failed to convert Context to Lite Context for train.";
280 return kLiteNullptr;
281 }
282
283 auto session = create_callback(graph_->graph_data_, cfg_, train_context);
284 if (session != nullptr) {
285 session_ = session;
286 MS_LOG(DEBUG) << "Build model success.";
287 auto ret_obf = ModelDeObfuscate();
288 if (ret_obf != RET_OK) {
289 MS_LOG(ERROR) << "Model deobfuscate failed.";
290 return kLiteError;
291 }
292 return kSuccess;
293 }
294 }
295
296 auto model = graph_->graph_data_->lite_model();
297 if (model == nullptr || model->buf == nullptr) {
298 MS_LOG(ERROR) << "Lite model has been freed.";
299 return kLiteError;
300 }
301
302 auto session = std::shared_ptr<lite::LiteSession>(CreateLiteSession(inner_context));
303 if (session == nullptr) {
304 MS_LOG(ERROR) << "Allocate session failed.";
305 return kLiteNullptr;
306 }
307 std::string model_id;
308 std::string runner_id;
309 auto model_buf = model->buf;
310 auto model_size = model->buf_size_;
311 auto is_shared_weight = false;
312 auto status = lite::PackWeightManager::GetInstance()->InitPackWeightManager(model_buf, model_size, &model_id,
313 &runner_id, &config_info_);
314 if (status != RET_OK) {
315 MS_LOG(ERROR) << "InitPackWeightByBuf failed.";
316 return kLiteError;
317 }
318 // free in PackWeight
319 auto new_model_buf =
320 lite::PackWeightManager::GetInstance()->GetSharedModelBuf(model_buf, model_id, &config_info_, &is_shared_weight);
321 if (new_model_buf == nullptr) {
322 MS_LOG(ERROR) << "get shared model buf is nullptr.";
323 return kLiteNullptr;
324 }
325 model->buf = new_model_buf;
326 session->SetModelId(model_id);
327 auto ret = session->CompileGraph(model.get());
328 if (ret != RET_OK) {
329 MS_LOG(ERROR) << "Build model failed.";
330 return static_cast<StatusCode>(ret);
331 }
332 model->buf = model_buf;
333 session_.swap(session);
334 MS_LOG(DEBUG) << "Build model success.";
335 auto ret_obf = ModelDeObfuscate();
336 if (ret_obf != RET_OK) {
337 MS_LOG(ERROR) << "Model deobfuscate failed.";
338 return kLiteError;
339 }
340 return kSuccess;
341 }
342
ResetTensorData(std::vector<void * > old_data,const std::vector<lite::Tensor * > & tensors)343 static void ResetTensorData(std::vector<void *> old_data, const std::vector<lite::Tensor *> &tensors) {
344 for (size_t j = 0; j < old_data.size(); j++) {
345 tensors.at(j)->set_data(old_data.at(j), tensors.at(j)->own_data());
346 }
347 }
348
RunGraph(const MSKernelCallBack & before,const MSKernelCallBack & after)349 Status ModelImpl::RunGraph(const MSKernelCallBack &before, const MSKernelCallBack &after) {
350 lite::KernelCallBack before_call_back = nullptr;
351 lite::KernelCallBack after_call_back = nullptr;
352 if (before != nullptr) {
353 before_call_back = [&](const std::vector<mindspore::lite::Tensor *> &before_inputs,
354 const std::vector<mindspore::lite::Tensor *> &before_outputs,
355 const MSCallBackParam &call_param) {
356 std::vector<MSTensor> inputs = LiteTensorsToMSTensors(before_inputs);
357 std::vector<MSTensor> outputs = LiteTensorsToMSTensors(before_outputs);
358 return before(inputs, outputs, call_param);
359 };
360 }
361
362 if (after != nullptr) {
363 after_call_back = [&](const std::vector<mindspore::lite::Tensor *> &before_inputs,
364 const std::vector<mindspore::lite::Tensor *> &before_outputs,
365 const MSCallBackParam &call_param) {
366 std::vector<MSTensor> inputs = LiteTensorsToMSTensors(before_inputs);
367 std::vector<MSTensor> outputs = LiteTensorsToMSTensors(before_outputs);
368 return after(inputs, outputs, call_param);
369 };
370 }
371 auto ret = session_->RunGraph(before_call_back, after_call_back);
372 return static_cast<StatusCode>(ret);
373 }
374
IsTrainModel()375 bool ModelImpl::IsTrainModel() { return (graph_ && graph_->graph_data_ && graph_->graph_data_->IsTrainModel()); }
376
LoadConfig(const std::string & config_path)377 Status ModelImpl::LoadConfig(const std::string &config_path) {
378 if (session_ != nullptr) {
379 MS_LOG(ERROR) << "Model has been called Build, please call LoadConfig before Build.";
380 return kLiteError;
381 }
382 std::map<std::string, std::map<std::string, std::string>> all_config_info;
383 int ret = lite::GetAllSectionInfoFromConfigFile(config_path, &all_config_info);
384 if (ret != RET_OK) {
385 MS_LOG(ERROR) << "GetAllSectionInfoFromConfigFile fail!ret: " << ret;
386 return kLiteFileError;
387 }
388 config_info_ = all_config_info;
389 std::map<std::string, std::string> config_info = all_config_info[kExecutionPlan];
390 if (config_info.empty()) {
391 MS_LOG(WARNING) << "No valid execution plan info in config file.";
392 return kSuccess;
393 }
394
395 lite::ParserExecutionPlan(&config_info, &execution_plan_);
396 return kSuccess;
397 }
398
UpdateConfig(const std::string & section,const std::pair<std::string,std::string> & config)399 Status ModelImpl::UpdateConfig(const std::string §ion, const std::pair<std::string, std::string> &config) {
400 auto iter = config_info_.find(section);
401 if (iter == config_info_.end()) {
402 if (config_info_.size() >= kMaxSectionNum) {
403 MS_LOG(ERROR) << "config too many sections!";
404 return kLiteError;
405 }
406 config_info_[section][config.first] = config.second;
407 return kSuccess;
408 }
409 if (iter->second.size() >= kMaxConfigNumPerSection) {
410 MS_LOG(ERROR) << "config too many items!";
411 return kLiteError;
412 }
413 iter->second[config.first] = config.second;
414 return kSuccess;
415 }
416
Predict(const std::vector<MSTensor> & inputs,std::vector<MSTensor> * outputs,const MSKernelCallBack & before,const MSKernelCallBack & after)417 Status ModelImpl::Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs,
418 const MSKernelCallBack &before, const MSKernelCallBack &after) {
419 MS_CHECK_TRUE_MSG(session_ != nullptr, kLiteNullptr, "Model has not been called Build, or Model Build has failed.");
420 MS_CHECK_TRUE_MSG(outputs != nullptr, kLiteError, "outputs is nullptr.");
421 auto input_tensors = session_->GetInputs();
422 if (input_tensors.empty()) {
423 MS_LOG(ERROR) << "Failed to get input tensor.";
424 return kLiteError;
425 }
426 if (input_tensors.size() != inputs.size()) {
427 MS_LOG(ERROR) << "Wrong input size.";
428 return kLiteError;
429 }
430 std::vector<void *> old_data;
431 for (size_t i = 0; i < inputs.size(); i++) {
432 old_data.push_back(input_tensors.at(i)->data());
433 }
434 for (size_t i = 0; i < inputs.size(); i++) {
435 auto input = input_tensors.at(i);
436 auto user_input = inputs.at(i);
437 if (user_input.DataType() != static_cast<enum DataType>(input->data_type())) {
438 ResetTensorData(old_data, input_tensors);
439 MS_LOG(ERROR) << "Tensor " << user_input.Name() << " has a different data type from input" << input->tensor_name()
440 << ".";
441 return kLiteInputTensorError;
442 }
443 if (user_input.Data() == nullptr) {
444 ResetTensorData(old_data, input_tensors);
445 MS_LOG(ERROR) << "Tensor " << user_input.Name() << " has no data.";
446 return kLiteInputTensorError;
447 }
448 if (user_input.Name() != input->tensor_name() && user_input.Name() != "MindDataTensor") {
449 MS_LOG(WARNING) << "Tensor " << user_input.Name() << " has a different name from input" << input->tensor_name()
450 << ".";
451 }
452 if (input->data_type() == kObjectTypeString) {
453 #ifndef STRING_KERNEL_CLIP
454 std::vector<int32_t> shape = TruncateShape(user_input.Shape(), input->data_type(), user_input.DataSize(), false);
455 if (shape.empty() && !(user_input.Shape().empty())) {
456 ResetTensorData(old_data, input_tensors);
457 MS_LOG(ERROR) << "Input dims of tensor " << user_input.Name() << " is invalid.";
458 return kLiteParamInvalid;
459 }
460 input->set_shape(shape);
461 input->set_data(user_input.MutableData());
462 #else
463 MS_LOG(ERROR) << unsupport_string_tensor_log;
464 return kLiteError;
465 #endif
466 } else {
467 if (user_input.MutableData() != input->data()) {
468 if (input->Size() != user_input.DataSize()) {
469 ResetTensorData(old_data, input_tensors);
470 #ifndef ENABLE_LITE_ACL
471 MS_LOG(ERROR) << "Tensor " << user_input.Name() << " has wrong data size.";
472 return kLiteInputTensorError;
473 #else
474 MS_LOG(WARNING) << "Please check tensor " << user_input.Name()
475 << " has been modified data size by DVPP method.";
476 std::vector<int> truncate_shape = {static_cast<int>(user_input.DataSize())};
477 input->set_shape(truncate_shape);
478 #endif
479 }
480 if (user_input.allocator() == input->allocator()) {
481 input->set_data(user_input.MutableData());
482 input->set_own_data(false);
483 } else {
484 void *user_data = user_input.MutableData();
485 if (user_data == nullptr) {
486 MS_LOG(ERROR) << "user data is nullptr";
487 return kLiteNullptr;
488 }
489 void *input_data = input->MutableData();
490 if (input_data == nullptr) {
491 MS_LOG(ERROR) << "input data is nullptr";
492 return kLiteNullptr;
493 }
494 memcpy(input_data, user_data, input->Size());
495 }
496 }
497 }
498 }
499
500 auto ori_output_tensors = GetOutputs();
501 std::vector<bool> copy_output_data;
502 copy_output_data.resize(ori_output_tensors.size(), false);
503 if (outputs->empty()) {
504 MS_LOG(INFO) << "user provided output is empty";
505 } else if (outputs->size() != ori_output_tensors.size()) {
506 MS_LOG(ERROR) << "user provided output size is not equal to model's output size";
507 return kLiteError;
508 } else {
509 for (size_t i = 0; i < ori_output_tensors.size(); i++) {
510 auto ori_output = ori_output_tensors[i];
511 auto lite_impl = std::static_pointer_cast<LiteTensorImpl>(ori_output.impl());
512 MS_CHECK_TRUE_RET(lite_impl != nullptr, kLiteNullptr);
513 auto ori_out_tensor = static_cast<lite::Tensor *>(lite_impl->lite_tensor());
514 MS_CHECK_TRUE_RET(ori_out_tensor != nullptr, kLiteNullptr);
515
516 auto user_output = (*outputs)[i];
517 auto user_lite_impl = std::static_pointer_cast<LiteTensorImpl>(user_output.impl());
518 MS_CHECK_TRUE_RET(user_lite_impl != nullptr, kLiteNullptr);
519 auto user_out_tensor = user_lite_impl->lite_tensor();
520 if (ori_out_tensor == user_out_tensor) {
521 continue;
522 }
523
524 void *user_out_data = nullptr;
525 if (user_output.DataSize() > 0) {
526 user_out_data = user_output.MutableData();
527 }
528 if (ori_out_tensor->allocator() == user_output.allocator() && user_out_data != nullptr) {
529 MS_LOG(INFO) << "use user data";
530 ori_out_tensor->set_data(user_out_data);
531 ori_out_tensor->set_own_data(false);
532 } else {
533 copy_output_data[i] = true;
534 }
535 }
536 }
537
538 auto ret = RunGraph(before, after);
539 ResetTensorData(old_data, input_tensors);
540 if (ret != kSuccess) {
541 MS_LOG(ERROR) << "Run graph failed.";
542 return ret;
543 }
544 MS_LOG(DEBUG) << "Run graph success.";
545
546 for (size_t i = 0; i < copy_output_data.size(); i++) {
547 if (!copy_output_data[i]) {
548 continue;
549 }
550 auto ori_output = ori_output_tensors[i];
551 auto ori_out_data = ori_output.MutableData();
552 MS_CHECK_TRUE_RET(ori_out_data != nullptr, kLiteNullptr);
553 auto user_output = (*outputs)[i];
554 MS_CHECK_TRUE_RET(user_output.MutableData() != nullptr, kLiteNullptr);
555 if (user_output.DataSize() >= ori_output.DataSize()) {
556 memcpy(user_output.MutableData(), ori_out_data, ori_output.DataSize());
557 } else {
558 MS_LOG(ERROR) << "user out data size is less than model's output data size";
559 return kLiteError;
560 }
561 }
562
563 if (outputs->size() == ori_output_tensors.size()) {
564 return kSuccess;
565 }
566
567 auto res = GetOutputs();
568 if (res.empty()) {
569 MS_LOG(DEBUG) << "Empty outputs.";
570 return kLiteError;
571 }
572 outputs->clear();
573 outputs->insert(outputs->end(), res.begin(), res.end());
574 return kSuccess;
575 }
576
Predict(const MSKernelCallBack & before,const MSKernelCallBack & after)577 Status ModelImpl::Predict(const MSKernelCallBack &before, const MSKernelCallBack &after) {
578 if (session_ == nullptr) {
579 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
580 return kLiteNullptr;
581 }
582 auto input_tensors = session_->GetInputs();
583 if (input_tensors.empty()) {
584 MS_LOG(ERROR) << "Failed to get input tensor.";
585 return kLiteError;
586 }
587
588 for (auto &input : input_tensors) {
589 if (input->data() == nullptr) {
590 MS_LOG(ERROR) << "Tensor " << input->tensor_name() << " has no data.";
591 return kLiteInputTensorError;
592 }
593 }
594 auto ret = RunGraph(before, after);
595 if (ret != kSuccess) {
596 MS_LOG(ERROR) << "Run graph failed : " << ret;
597 return ret;
598 }
599 MS_LOG(DEBUG) << "Run graph success.";
600 return kSuccess;
601 }
602
GetInputs()603 std::vector<MSTensor> ModelImpl::GetInputs() {
604 if (session_ == nullptr) {
605 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
606 return {};
607 }
608 std::vector<MSTensor> res;
609 auto inputs = session_->GetInputs();
610 if (inputs.empty()) {
611 MS_LOG(ERROR) << "The inputs of model is null.";
612 return {};
613 }
614 res.resize(inputs.size());
615 for (size_t i = 0; i < inputs.size(); i++) {
616 auto impl = std::make_shared<LiteTensorImpl>(inputs[i]);
617 if (impl == nullptr || impl->lite_tensor() == nullptr) {
618 MS_LOG(ERROR) << "Create tensor failed.";
619 return {};
620 }
621
622 res[i] = MSTensor(impl);
623 }
624 return res;
625 }
626
GetOutputs()627 std::vector<MSTensor> ModelImpl::GetOutputs() {
628 if (session_ == nullptr) {
629 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
630 return {};
631 }
632 std::vector<MSTensor> res;
633 auto names = session_->GetOutputTensorNames();
634 if (names.empty()) {
635 MS_LOG(ERROR) << "The output tensor name of this model is null.";
636 return {};
637 }
638 auto outputs = session_->GetOutputs();
639 if (outputs.empty()) {
640 MS_LOG(ERROR) << "The outputs of model is null.";
641 return {};
642 }
643 if (names.size() != outputs.size()) {
644 MS_LOG(ERROR) << "The size of outputs dose not match the size of names.";
645 return {};
646 }
647 res.resize(names.size());
648 for (size_t i = 0; i < names.size(); i++) {
649 auto impl = std::make_shared<LiteTensorImpl>(outputs[names[i]]);
650 if (impl == nullptr || impl->lite_tensor() == nullptr) {
651 MS_LOG(ERROR) << "Create tensor failed.";
652 return {};
653 }
654 auto tensor = MSTensor(impl);
655 if (tensor == nullptr) {
656 MS_LOG(ERROR) << "Create tensor failed.";
657 return {};
658 }
659 res[i] = tensor;
660 }
661 return res;
662 }
663
GetGradients() const664 std::vector<MSTensor> ModelImpl::GetGradients() const {
665 if (session_ == nullptr) {
666 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
667 return {};
668 }
669 auto params = session_->GetGradients();
670 if (params.empty()) {
671 MS_LOG(ERROR) << "No optimizer parameters avelibale.";
672 return {};
673 }
674 std::vector<MSTensor> res = LiteTensorsToMSTensors(params, false);
675 return res;
676 }
677
ApplyGradients(const std::vector<MSTensor> & gradients)678 Status ModelImpl::ApplyGradients(const std::vector<MSTensor> &gradients) {
679 if (session_ == nullptr) {
680 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
681 return kLiteNullptr;
682 }
683 if (gradients.empty()) {
684 MS_LOG(ERROR) << "gradients is null.";
685 return kLiteInputParamInvalid;
686 }
687 std::vector<lite::Tensor *> inner_gradients;
688 inner_gradients.resize(gradients.size());
689 for (size_t i = 0; i < gradients.size(); i++) {
690 auto gradient = gradients[i];
691 if (gradient.impl_ == nullptr) {
692 MS_LOG(ERROR) << "gradient tensor " << gradient.Name() << " is null.";
693 return kLiteInputTensorError;
694 }
695 auto lite_impl = std::static_pointer_cast<LiteTensorImpl>(gradient.impl_);
696 if (lite_impl == nullptr || lite_impl->lite_tensor() == nullptr) {
697 MS_LOG(ERROR) << "gradient tensor " << gradient.Name() << " is null.";
698 return kLiteInputTensorError;
699 }
700 inner_gradients[i] = lite_impl->lite_tensor();
701 }
702 auto ret = session_->ApplyGradients(inner_gradients);
703 return static_cast<StatusCode>(ret);
704 }
705
GetFeatureMaps() const706 std::vector<MSTensor> ModelImpl::GetFeatureMaps() const {
707 if (session_ == nullptr) {
708 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
709 return {};
710 }
711 auto params = session_->GetFeatureMaps();
712 if (params.empty()) {
713 MS_LOG(ERROR) << "No optimizer parameters avelibale.";
714 return {};
715 }
716 std::vector<MSTensor> res = LiteTensorsToMSTensors(params, true);
717 return res;
718 }
719
GetTrainableParams() const720 std::vector<MSTensor> ModelImpl::GetTrainableParams() const {
721 if (session_ == nullptr) {
722 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
723 return {};
724 }
725 auto params = session_->GetTrainableParams();
726 if (params.empty()) {
727 MS_LOG(ERROR) << "No trainable parameters available.";
728 return {};
729 }
730 std::vector<MSTensor> res = LiteTensorsToMSTensors(params, true);
731 return res;
732 }
733
UpdateFeatureMaps(const std::vector<MSTensor> & new_weights)734 Status ModelImpl::UpdateFeatureMaps(const std::vector<MSTensor> &new_weights) {
735 if (session_ == nullptr) {
736 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
737 return kLiteNullptr;
738 }
739 if (new_weights.empty()) {
740 MS_LOG(ERROR) << "gradients is null.";
741 return kLiteInputParamInvalid;
742 }
743 std::vector<lite::Tensor *> inner_weights;
744 inner_weights.resize(new_weights.size());
745 for (size_t i = 0; i < new_weights.size(); i++) {
746 auto new_weight = new_weights[i];
747 if (new_weight.impl_ == nullptr) {
748 MS_LOG(ERROR) << "weight tensor " << new_weight.Name() << " is null.";
749 return kLiteInputTensorError;
750 }
751 auto lite_impl = std::static_pointer_cast<LiteTensorImpl>(new_weight.impl_);
752 if (lite_impl == nullptr || lite_impl->lite_tensor() == nullptr) {
753 MS_LOG(ERROR) << "weight tensor " << new_weight.Name() << " is null.";
754 return kLiteInputTensorError;
755 }
756 inner_weights[i] = lite_impl->lite_tensor();
757 }
758 auto ret = session_->UpdateFeatureMaps(inner_weights);
759 return static_cast<StatusCode>(ret);
760 }
761
GetOptimizerParams() const762 std::vector<MSTensor> ModelImpl::GetOptimizerParams() const {
763 if (session_ == nullptr) {
764 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
765 return {};
766 }
767 auto params = session_->GetOptimizerParams();
768 if (params.empty()) {
769 MS_LOG(ERROR) << "No optimizer parameters avelibale.";
770 return {};
771 }
772 std::vector<MSTensor> res = LiteTensorsToMSTensors(params);
773 return res;
774 }
775
SetOptimizerParams(const std::vector<MSTensor> & params)776 Status ModelImpl::SetOptimizerParams(const std::vector<MSTensor> ¶ms) {
777 if (session_ == nullptr) {
778 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
779 return kLiteNullptr;
780 }
781 if (params.empty()) {
782 MS_LOG(ERROR) << "params is null.";
783 return kLiteInputParamInvalid;
784 }
785 std::vector<lite::Tensor *> inner_params;
786 inner_params.resize(params.size());
787 for (size_t i = 0; i < params.size(); i++) {
788 auto param = params[i];
789 if (param.impl_ == nullptr) {
790 MS_LOG(ERROR) << "Param tensor " << param.Name() << " is null.";
791 return kLiteInputTensorError;
792 }
793 auto lite_impl = std::static_pointer_cast<LiteTensorImpl>(param.impl_);
794 if (lite_impl == nullptr || lite_impl->lite_tensor() == nullptr) {
795 MS_LOG(ERROR) << "Param tensor " << param.Name() << " is null.";
796 return kLiteInputTensorError;
797 }
798 inner_params[i] = lite_impl->lite_tensor();
799 }
800 auto ret = session_->SetOptimizerParams(inner_params);
801 return static_cast<StatusCode>(ret);
802 }
803
GetInputByTensorName(const std::string & name)804 MSTensor ModelImpl::GetInputByTensorName(const std::string &name) {
805 if (session_ == nullptr) {
806 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
807 return MSTensor(nullptr);
808 }
809 auto res = session_->GetInputsByTensorName(name);
810 if (res == nullptr) {
811 MS_LOG(ERROR) << "Model does not contains tensor " << name << " .";
812 return MSTensor(nullptr);
813 }
814 auto impl = std::make_shared<LiteTensorImpl>(res);
815 if (impl == nullptr || impl->lite_tensor() == nullptr) {
816 MS_LOG(ERROR) << "Create tensor failed.";
817 return MSTensor(nullptr);
818 }
819
820 return MSTensor(impl);
821 }
822
GetOutputTensorNames()823 std::vector<std::string> ModelImpl::GetOutputTensorNames() {
824 if (session_ == nullptr) {
825 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
826 return {};
827 }
828 return session_->GetOutputTensorNames();
829 }
830
GetOutputByTensorName(const std::string & name)831 MSTensor ModelImpl::GetOutputByTensorName(const std::string &name) {
832 if (session_ == nullptr) {
833 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
834 return MSTensor(nullptr);
835 }
836 auto res = session_->GetOutputByTensorName(name);
837 if (res == nullptr) {
838 MS_LOG(ERROR) << "Model does not contains tensor " << name << " .";
839 return MSTensor(nullptr);
840 }
841 auto impl = std::make_shared<LiteTensorImpl>(res);
842 if (impl == nullptr || impl->lite_tensor() == nullptr) {
843 MS_LOG(ERROR) << "Create tensor failed.";
844 return MSTensor(nullptr);
845 }
846
847 return MSTensor(impl);
848 }
849
GetOutputsByNodeName(const std::string & name)850 std::vector<MSTensor> ModelImpl::GetOutputsByNodeName(const std::string &name) {
851 if (session_ == nullptr) {
852 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
853 return {};
854 }
855 std::vector<MSTensor> res;
856 auto outputs = session_->GetOutputsByNodeName(name);
857 if (outputs.empty()) {
858 MS_LOG(ERROR) << "The outputs of model is null.";
859 return {};
860 }
861 res.resize(outputs.size());
862 for (size_t i = 0; i < outputs.size(); i++) {
863 auto impl = std::make_shared<LiteTensorImpl>(outputs[i]);
864 if (impl == nullptr || impl->lite_tensor() == nullptr) {
865 MS_LOG(ERROR) << "Create tensor failed.";
866 return {};
867 }
868 auto tensor = MSTensor(impl);
869 if (tensor == nullptr) {
870 MS_LOG(ERROR) << "Create tensor failed.";
871 return {};
872 }
873 res[i] = tensor;
874 }
875 return res;
876 }
877
BindGLTexture2DMemory(const std::map<std::string,unsigned int> & inputGLTexture,std::map<std::string,unsigned int> * outputGLTexture)878 Status ModelImpl::BindGLTexture2DMemory(const std::map<std::string, unsigned int> &inputGLTexture,
879 std::map<std::string, unsigned int> *outputGLTexture) {
880 MS_LOG(INFO) << "Bind GLTexture2D to Input MsTensors and Output MsTensors";
881 if (session_ == nullptr) {
882 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
883 return kLiteError;
884 }
885 auto status = session_->BindGLTexture2DMemory(inputGLTexture, outputGLTexture);
886 if (status != RET_OK) {
887 MS_LOG(ERROR) << "Bing OpenGL Texture to OpenCl Memory failed";
888 return kLiteError;
889 }
890 return kSuccess;
891 }
892
Resize(const std::vector<MSTensor> & inputs,const std::vector<std::vector<int64_t>> & dims)893 Status ModelImpl::Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims) {
894 if (session_ == nullptr) {
895 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
896 return kLiteNullptr;
897 }
898 if (inputs.empty()) {
899 MS_LOG(ERROR) << "Inputs is null.";
900 return kLiteInputParamInvalid;
901 }
902 if (dims.empty()) {
903 MS_LOG(ERROR) << "Dims is null.";
904 return kLiteInputParamInvalid;
905 }
906 if (inputs.size() != dims.size()) {
907 MS_LOG(ERROR) << "The size of inputs does not match the size of dims.";
908 return kLiteInputParamInvalid;
909 }
910 auto model_inputs = session_->GetInputs();
911 if (model_inputs.empty()) {
912 MS_LOG(ERROR) << "The inputs of model is null.";
913 return kLiteParamInvalid;
914 }
915 if (inputs.size() != model_inputs.size()) {
916 MS_LOG(ERROR) << "The size of inputs is incorrect.";
917 return kLiteInputParamInvalid;
918 }
919 std::vector<lite::Tensor *> inner_input;
920 inner_input.resize(inputs.size());
921 std::vector<std::vector<int32_t>> truncated_shape;
922 truncated_shape.resize(inputs.size());
923 for (size_t i = 0; i < inputs.size(); i++) {
924 auto input = inputs[i];
925 if (input.impl_ == nullptr) {
926 MS_LOG(ERROR) << "Input tensor " << input.Name() << " is null.";
927 return kLiteInputTensorError;
928 }
929 auto lite_impl = std::static_pointer_cast<LiteTensorImpl>(input.impl_);
930 if (lite_impl == nullptr || lite_impl->lite_tensor() == nullptr) {
931 MS_LOG(ERROR) << "Input tensor " << input.Name() << " is null.";
932 return kLiteInputTensorError;
933 }
934 inner_input[i] = lite_impl->lite_tensor();
935 std::vector<int32_t> shape = TruncateShape(dims[i], inner_input[i]->data_type(), inner_input[i]->Size(), false);
936 if (shape.empty() && !(dims[i].empty())) {
937 MS_LOG(ERROR) << "Input dims[" << i << "] is invalid.";
938 return kLiteParamInvalid;
939 }
940 truncated_shape[i] = shape;
941 }
942 auto ret = session_->Resize(inner_input, truncated_shape);
943 return static_cast<StatusCode>(ret);
944 }
945
UpdateWeights(const std::vector<MSTensor> & new_weights)946 Status ModelImpl::UpdateWeights(const std::vector<MSTensor> &new_weights) {
947 if (session_ == nullptr) {
948 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
949 return kLiteNullptr;
950 }
951 if (new_weights.empty()) {
952 MS_LOG(ERROR) << "New weights are empty.";
953 return kLiteInputParamInvalid;
954 }
955 std::vector<lite::Tensor *> inner_weights;
956 inner_weights.resize(new_weights.size());
957 for (size_t i = 0; i < new_weights.size(); i++) {
958 auto weight = new_weights[i];
959 if (weight.impl_ == nullptr) {
960 MS_LOG(ERROR) << "Weight tensor " << weight.Name() << " is null.";
961 return kLiteInputTensorError;
962 }
963 auto lite_impl = std::static_pointer_cast<LiteTensorImpl>(weight.impl_);
964 if (lite_impl == nullptr || lite_impl->lite_tensor() == nullptr) {
965 MS_LOG(ERROR) << "Weight tensor " << weight.Name() << " is null.";
966 return kLiteInputTensorError;
967 }
968 inner_weights[i] = lite_impl->lite_tensor();
969 }
970 auto ret = session_->UpdateWeights(inner_weights);
971 if (ret != kSuccess) {
972 MS_LOG(ERROR) << "UpdateWeights failed, and the origin weights may have been changed.";
973 }
974 return static_cast<StatusCode>(ret);
975 }
976
SetupVirtualBatch(int virtual_batch_multiplier,float lr,float momentum)977 Status ModelImpl::SetupVirtualBatch(int virtual_batch_multiplier, float lr, float momentum) {
978 if (session_ == nullptr) {
979 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
980 return kLiteNullptr;
981 }
982 auto ret = session_->SetupVirtualBatch(virtual_batch_multiplier, lr, momentum);
983 return static_cast<StatusCode>(ret);
984 }
985
SetLearningRate(float learning_rate)986 Status ModelImpl::SetLearningRate(float learning_rate) {
987 if (session_ == nullptr) {
988 MS_LOG(ERROR) << "Model has not been called Build, or Model Build has failed";
989 return kLiteNullptr;
990 }
991 auto ret = session_->SetLearningRate(learning_rate);
992 return static_cast<StatusCode>(ret);
993 }
994
GetLearningRate()995 float ModelImpl::GetLearningRate() {
996 if (session_ == nullptr) {
997 MS_LOG(WARNING) << "Model has not been called Build, or Model Build has failed";
998 return 0.0;
999 }
1000 return session_->GetLearningRate();
1001 }
1002
CreateLiteSession(const std::shared_ptr<lite::InnerContext> & context)1003 lite::LiteSession *ModelImpl::CreateLiteSession(const std::shared_ptr<lite::InnerContext> &context) {
1004 if (context == nullptr) {
1005 MS_LOG(ERROR) << "context is nullptr";
1006 return nullptr;
1007 }
1008 lite::LiteSession *session = nullptr;
1009 #ifdef SUPPORT_NNRT_METAGRAPH
1010 auto iter = std::find_if(context->device_list_.begin(), context->device_list_.end(),
1011 [](lite::DeviceContext &device) { return device.device_type_ == lite::DT_NNRT; });
1012 if(iter != context->device_list_.end()) {
1013 const auto &nnrt_device_info = iter->device_info_.nnrt_device_info_;
1014 if (lite::CacheSession::IsKirinNPUWithOnlineInference(nnrt_device_info.device_id_)) {
1015 const auto &extensions = nnrt_device_info.extensions_;
1016 lite::nnrt::ExtensionOptions extension_options;
1017 mindspore::lite::nnrt::ExtensionOptionsParser::Parse(extensions, &extension_options);
1018 auto has_cache = OH_NNModel_HasCache(extension_options.cache_path_.c_str(), extension_options.model_name.c_str(),
1019 extension_options.cache_version_);
1020 if (has_cache) {
1021 session = reinterpret_cast<lite::LiteSession *>(new (std::nothrow) lite::CacheSession());
1022 if (session == nullptr) {
1023 MS_LOG(ERROR) << "create cache session failed";
1024 return nullptr;
1025 }
1026 }
1027 }
1028 }
1029 #endif
1030
1031 if (session == nullptr) {
1032 session = new (std::nothrow) lite::LiteSession();
1033 }
1034 if (session == nullptr) {
1035 MS_LOG(ERROR) << "create session failed";
1036 return nullptr;
1037 }
1038 session->InitExecutionConfig(&execution_plan_);
1039 session->SetConfigInfo(&config_info_);
1040
1041 auto ret = session->Init(context);
1042 if (ret != mindspore::lite::RET_OK) {
1043 MS_LOG(ERROR) << "init session failed";
1044 delete session;
1045 return nullptr;
1046 }
1047 return session;
1048 }
1049
IsValidDoubleNum(const std::string & num_str)1050 bool ModelImpl::IsValidDoubleNum(const std::string &num_str) {
1051 if (num_str.empty()) {
1052 return false;
1053 }
1054 std::istringstream iss(num_str);
1055 double d;
1056 iss >> std::noskipws >> d;
1057 return iss.eof() && !iss.fail();
1058 }
1059
ModelDeObfuscate()1060 int ModelImpl::ModelDeObfuscate() {
1061 float obf_ratio = -1.0;
1062 auto iter = config_info_.find(kBuildSection);
1063 if (iter != config_info_.end()) {
1064 auto item_runner = iter->second.find(kObfRatioKey);
1065 if (item_runner != iter->second.end()) {
1066 if (IsValidDoubleNum(iter->second.at(kObfRatioKey))) {
1067 float candidate_obf_ratio = std::stof(iter->second.at(kObfRatioKey));
1068 if (!lite::FloatCompare(candidate_obf_ratio, 1.0) && !lite::FloatCompare(candidate_obf_ratio, 0.0)) {
1069 // obtain legal obf_ratio
1070 obf_ratio = candidate_obf_ratio;
1071 }
1072 } else {
1073 MS_LOG(ERROR) << "Obfuscate ratio should be float but got " << iter->second.at(kObfRatioKey);
1074 return RET_ERROR;
1075 }
1076 }
1077 } else {
1078 MS_LOG(INFO) << "No obfuscate key find in config file";
1079 }
1080 if (obf_ratio > 50.0) {
1081 MS_LOG(ERROR) << "Obf ratio is greater than 50. Please set it within the range of (0, 50]";
1082 return RET_ERROR;
1083 }
1084
1085 auto model = graph_->graph_data_->lite_model();
1086 std::string tensor_name = "";
1087 for (auto node : model->graph_.all_nodes_) {
1088 if (node->name_.find(kObfNodeName) != std::string::npos) {
1089 auto idx = node->input_indices_[kDataIndex];
1090 auto *tensor = model->graph_.all_tensors_[idx];
1091 if (tensor == nullptr) {
1092 MS_LOG(ERROR) << "Obfuscate tensor is null.";
1093 return RET_ERROR;
1094 }
1095 if (tensor->name() != nullptr) {
1096 tensor_name = tensor->name()->str();
1097 }
1098 }
1099 }
1100 if (tensor_name.empty()) {
1101 MS_LOG(INFO) << "Could not find corresponding tensor of the obfuscate value";
1102 return RET_OK;
1103 }
1104 MS_LOG(DEBUG) << "Find obfuscate value in tensor " << tensor_name;
1105
1106 float data[1] = {obf_ratio};
1107 auto new_tensor =
1108 MSTensor::CreateTensor(tensor_name, mindspore::DataType::kNumberTypeFloat32, {1, 1}, data, kFloatSize);
1109 if (new_tensor == nullptr) {
1110 MS_LOG(ERROR) << "Create tensor failed";
1111 return RET_ERROR;
1112 }
1113 std::vector<mindspore::MSTensor> modify_tensors;
1114 modify_tensors.emplace_back(*new_tensor);
1115 auto ret = this->UpdateWeights(modify_tensors);
1116 if (ret != kSuccess) {
1117 MS_LOG(ERROR) << "UpdateWeights failed.";
1118 return RET_ERROR;
1119 }
1120 return RET_OK;
1121 }
1122
Finalize()1123 Status ModelImpl::Finalize() {
1124 MS_LOG(INFO) << "Finalize is only support for mindspore_lite's ascend backend.";
1125 return kSuccess;
1126 }
1127
1128 } // namespace mindspore
1129