• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <iostream>
18 #include <cstring>
19 #include <random>
20 #include <fstream>
21 #include <thread>
22 #include <algorithm>
23 #include "include/api/allocator.h"
24 #include "include/api/model.h"
25 #include "include/api/context.h"
26 #include "include/api/types.h"
27 #include "include/api/serialization.h"
28 
RealPath(const char * path)29 std::string RealPath(const char *path) {
30   const size_t max = 4096;
31   if (path == nullptr) {
32     std::cerr << "path is nullptr" << std::endl;
33     return "";
34   }
35   if ((strlen(path)) >= max) {
36     std::cerr << "path is too long" << std::endl;
37     return "";
38   }
39   auto resolved_path = std::make_unique<char[]>(max);
40   if (resolved_path == nullptr) {
41     std::cerr << "new resolved_path failed" << std::endl;
42     return "";
43   }
44 #ifdef _WIN32
45   char *real_path = _fullpath(resolved_path.get(), path, 1024);
46 #else
47   char *real_path = realpath(path, resolved_path.get());
48 #endif
49   if (real_path == nullptr || strlen(real_path) == 0) {
50     std::cerr << "file path is not valid : " << path << std::endl;
51     return "";
52   }
53   std::string res = resolved_path.get();
54   return res;
55 }
56 
ReadFile(const char * file,size_t * size)57 char *ReadFile(const char *file, size_t *size) {
58   if (file == nullptr) {
59     std::cerr << "file is nullptr." << std::endl;
60     return nullptr;
61   }
62 
63   std::ifstream ifs(file, std::ifstream::in | std::ifstream::binary);
64   if (!ifs.good()) {
65     std::cerr << "file: " << file << " is not exist." << std::endl;
66     return nullptr;
67   }
68 
69   if (!ifs.is_open()) {
70     std::cerr << "file: " << file << " open failed." << std::endl;
71     return nullptr;
72   }
73 
74   ifs.seekg(0, std::ios::end);
75   *size = ifs.tellg();
76   std::unique_ptr<char[]> buf(new (std::nothrow) char[*size]);
77   if (buf == nullptr) {
78     std::cerr << "malloc buf failed, file: " << file << std::endl;
79     ifs.close();
80     return nullptr;
81   }
82 
83   ifs.seekg(0, std::ios::beg);
84   ifs.read(buf.get(), *size);
85   ifs.close();
86 
87   return buf.release();
88 }
89 
90 template <typename T, typename Distribution>
GenerateRandomData(int size,void * data,Distribution distribution)91 void GenerateRandomData(int size, void *data, Distribution distribution) {
92   if (data == nullptr) {
93     std::cerr << "data is nullptr." << std::endl;
94     return;
95   }
96   std::mt19937 random_engine;
97   int elements_num = size / sizeof(T);
98   (void)std::generate_n(static_cast<T *>(data), elements_num,
99                         [&]() { return static_cast<T>(distribution(random_engine)); });
100 }
101 
CreateCPUDeviceInfo()102 std::shared_ptr<mindspore::CPUDeviceInfo> CreateCPUDeviceInfo() {
103   auto device_info = std::make_shared<mindspore::CPUDeviceInfo>();
104   if (device_info == nullptr) {
105     std::cerr << "New CPUDeviceInfo failed." << std::endl;
106     return nullptr;
107   }
108   // Use float16 operator as priority.
109   device_info->SetEnableFP16(true);
110   return device_info;
111 }
112 
CreateGPUDeviceInfo()113 std::shared_ptr<mindspore::GPUDeviceInfo> CreateGPUDeviceInfo() {
114   auto device_info = std::make_shared<mindspore::GPUDeviceInfo>();
115   if (device_info == nullptr) {
116     std::cerr << "New GPUDeviceInfo failed." << std::endl;
117     return nullptr;
118   }
119   // If GPU device info is set. The preferred backend is GPU, which means, if there is a GPU operator, it will run on
120   // the GPU first, otherwise it will run on the CPU.
121   // GPU use float16 operator as priority.
122   device_info->SetEnableFP16(true);
123   return device_info;
124 }
125 
CreateNPUDeviceInfo()126 std::shared_ptr<mindspore::KirinNPUDeviceInfo> CreateNPUDeviceInfo() {
127   auto device_info = std::make_shared<mindspore::KirinNPUDeviceInfo>();
128   if (device_info == nullptr) {
129     std::cerr << "New KirinNPUDeviceInfo failed." << std::endl;
130     return nullptr;
131   }
132   device_info->SetFrequency(3);
133   return device_info;
134 }
135 
GetInputsAndSetData(mindspore::Model * model)136 mindspore::Status GetInputsAndSetData(mindspore::Model *model) {
137   auto inputs = model->GetInputs();
138   // The model has only one input tensor.
139   auto in_tensor = inputs.front();
140   if (in_tensor == nullptr) {
141     std::cerr << "Input tensor is nullptr" << std::endl;
142     return mindspore::kLiteNullptr;
143   }
144   auto input_data = in_tensor.MutableData();
145   if (input_data == nullptr) {
146     std::cerr << "MallocData for inTensor failed." << std::endl;
147     return mindspore::kLiteNullptr;
148   }
149   GenerateRandomData<float>(in_tensor.DataSize(), input_data, std::uniform_real_distribution<float>(0.1f, 1.0f));
150   return mindspore::kSuccess;
151 }
152 
GetInputsByTensorNameAndSetData(mindspore::Model * model)153 mindspore::Status GetInputsByTensorNameAndSetData(mindspore::Model *model) {
154   auto in_tensor = model->GetInputByTensorName("graph_input-173");
155   if (in_tensor == nullptr) {
156     std::cerr << "Input tensor is nullptr" << std::endl;
157     return mindspore::kLiteNullptr;
158   }
159   auto input_data = in_tensor.MutableData();
160   if (input_data == nullptr) {
161     std::cerr << "MallocData for inTensor failed." << std::endl;
162     return mindspore::kLiteNullptr;
163   }
164   GenerateRandomData<float>(in_tensor.DataSize(), input_data, std::uniform_real_distribution<float>(0.1f, 1.0f));
165   return mindspore::kSuccess;
166 }
167 
GetOutputsByNodeName(mindspore::Model * model)168 void GetOutputsByNodeName(mindspore::Model *model) {
169   // model has a output node named output_node_name_0.
170   auto output_vec = model->GetOutputsByNodeName("Softmax-65");
171   // output node named output_node_name_0 has only one output tensor.
172   auto out_tensor = output_vec.front();
173   if (out_tensor == nullptr) {
174     std::cerr << "Output tensor is nullptr" << std::endl;
175     return;
176   }
177   std::cout << "tensor size is:" << out_tensor.DataSize() << " tensor elements num is:" << out_tensor.ElementNum()
178             << std::endl;
179   // The model output data is float 32.
180   if (out_tensor.DataType() != mindspore::DataType::kNumberTypeFloat32) {
181     std::cerr << "Output should in float32" << std::endl;
182     return;
183   }
184   auto out_data = reinterpret_cast<float *>(out_tensor.MutableData());
185   if (out_data == nullptr) {
186     std::cerr << "Data of out_tensor is nullptr" << std::endl;
187     return;
188   }
189   std::cout << "output data is:";
190   for (int i = 0; i < out_tensor.ElementNum() && i < 10; i++) {
191     std::cout << out_data[i] << " ";
192   }
193   std::cout << std::endl;
194 }
195 
GetOutputByTensorName(mindspore::Model * model)196 void GetOutputByTensorName(mindspore::Model *model) {
197   // We can use GetOutputTensorNames method to get all name of output tensor of model which is in order.
198   auto tensor_names = model->GetOutputTensorNames();
199   for (const auto &tensor_name : tensor_names) {
200     auto out_tensor = model->GetOutputByTensorName(tensor_name);
201     if (out_tensor == nullptr) {
202       std::cerr << "Output tensor is nullptr" << std::endl;
203       return;
204     }
205     std::cout << "tensor size is:" << out_tensor.DataSize() << " tensor elements num is:" << out_tensor.ElementNum()
206               << std::endl;
207     // The model output data is float 32.
208     if (out_tensor.DataType() != mindspore::DataType::kNumberTypeFloat32) {
209       std::cerr << "Output should in float32" << std::endl;
210       return;
211     }
212     auto out_data = reinterpret_cast<float *>(out_tensor.MutableData());
213     if (out_data == nullptr) {
214       std::cerr << "Data of out_tensor is nullptr" << std::endl;
215       return;
216     }
217     std::cout << "output data is:";
218     for (int i = 0; i < out_tensor.ElementNum() && i < 10; i++) {
219       std::cout << out_data[i] << " ";
220     }
221     std::cout << std::endl;
222   }
223 }
224 
GetOutputs(mindspore::Model * model)225 void GetOutputs(mindspore::Model *model) {
226   auto out_tensors = model->GetOutputs();
227   for (auto out_tensor : out_tensors) {
228     std::cout << "tensor name is:" << out_tensor.Name() << " tensor size is:" << out_tensor.DataSize()
229               << " tensor elements num is:" << out_tensor.ElementNum() << std::endl;
230     // The model output data is float 32.
231     if (out_tensor.DataType() != mindspore::DataType::kNumberTypeFloat32) {
232       std::cerr << "Output should in float32" << std::endl;
233       return;
234     }
235     auto out_data = reinterpret_cast<float *>(out_tensor.MutableData());
236     if (out_data == nullptr) {
237       std::cerr << "Data of out_tensor is nullptr" << std::endl;
238       return;
239     }
240     std::cout << "output data is:";
241     for (int i = 0; i < out_tensor.ElementNum() && i < 10; i++) {
242       std::cout << out_data[i] << " ";
243     }
244     std::cout << std::endl;
245   }
246 }
247 
CreateAndBuildModel(char * model_buf,size_t model_size)248 mindspore::Model *CreateAndBuildModel(char *model_buf, size_t model_size) {
249   // Create and init context, add CPU device info
250   auto context = std::make_shared<mindspore::Context>();
251   if (context == nullptr) {
252     std::cerr << "New context failed." << std::endl;
253     return nullptr;
254   }
255   auto &device_list = context->MutableDeviceInfo();
256   // If you need to use GPU or NPU, you can refer to CreateGPUDeviceInfo() or CreateNPUDeviceInfo().
257   auto cpu_device_info = CreateCPUDeviceInfo();
258   if (cpu_device_info == nullptr) {
259     std::cerr << "Create CPUDeviceInfo failed." << std::endl;
260     return nullptr;
261   }
262   device_list.push_back(cpu_device_info);
263 
264   // Create model
265   auto model = new (std::nothrow) mindspore::Model();
266   if (model == nullptr) {
267     std::cerr << "New Model failed." << std::endl;
268     return nullptr;
269   }
270   // Build model
271   auto build_ret = model->Build(model_buf, model_size, mindspore::kMindIR, context);
272   if (build_ret != mindspore::kSuccess) {
273     delete model;
274     std::cerr << "Build model failed." << std::endl;
275     return nullptr;
276   }
277   return model;
278 }
279 
CreateAndBuildModelComplicated(char * model_buf,size_t size)280 mindspore::Model *CreateAndBuildModelComplicated(char *model_buf, size_t size) {
281   // Create and init context, add CPU device info
282   auto context = std::make_shared<mindspore::Context>();
283   if (context == nullptr) {
284     std::cerr << "New context failed." << std::endl;
285     return nullptr;
286   }
287   auto &device_list = context->MutableDeviceInfo();
288   auto cpu_device_info = CreateCPUDeviceInfo();
289   if (cpu_device_info == nullptr) {
290     std::cerr << "Create CPUDeviceInfo failed." << std::endl;
291     return nullptr;
292   }
293   device_list.push_back(cpu_device_info);
294 
295   // Load graph
296   mindspore::Graph graph;
297   auto load_ret = mindspore::Serialization::Load(model_buf, size, mindspore::kMindIR, &graph);
298   if (load_ret != mindspore::kSuccess) {
299     std::cerr << "Load graph failed." << std::endl;
300     return nullptr;
301   }
302 
303   // Create model
304   auto model = new (std::nothrow) mindspore::Model();
305   if (model == nullptr) {
306     std::cerr << "New Model failed." << std::endl;
307     return nullptr;
308   }
309   // Build model
310   mindspore::GraphCell graph_cell(graph);
311   auto build_ret = model->Build(graph_cell, context);
312   if (build_ret != mindspore::kSuccess) {
313     delete model;
314     std::cerr << "Build model failed." << std::endl;
315     return nullptr;
316   }
317   return model;
318 }
319 
ResizeInputsTensorShape(mindspore::Model * model)320 mindspore::Status ResizeInputsTensorShape(mindspore::Model *model) {
321   auto inputs = model->GetInputs();
322   std::vector<int64_t> resize_shape = {1, 128, 128, 3};
323   // Assume the model has only one input,resize input shape to [1, 128, 128, 3]
324   std::vector<std::vector<int64_t>> new_shapes;
325   new_shapes.push_back(resize_shape);
326   return model->Resize(inputs, new_shapes);
327 }
328 
Run(const char * model_path)329 int Run(const char *model_path) {
330   // Read model file.
331   size_t size = 0;
332   char *model_buf = ReadFile(model_path, &size);
333   if (model_buf == nullptr) {
334     std::cerr << "Read model file failed." << std::endl;
335     return -1;
336   }
337 
338   // Create and Build MindSpore model.
339   auto model = CreateAndBuildModel(model_buf, size);
340   delete[](model_buf);
341   if (model == nullptr) {
342     std::cerr << "Create and build model failed." << std::endl;
343     return -1;
344   }
345 
346   // Set inputs data.
347   // You can also get input through other methods, and you can refer to GetInputsAndSetData()
348   auto generate_input_ret = GetInputsByTensorNameAndSetData(model);
349   if (generate_input_ret != mindspore::kSuccess) {
350     delete model;
351     std::cerr << "Set input data error " << generate_input_ret << std::endl;
352     return -1;
353   }
354 
355   auto inputs = model->GetInputs();
356   auto outputs = model->GetOutputs();
357   auto predict_ret = model->Predict(inputs, &outputs);
358   if (predict_ret != mindspore::kSuccess) {
359     delete model;
360     std::cerr << "Predict error " << predict_ret << std::endl;
361     return -1;
362   }
363 
364   // Get outputs data.
365   // You can also get output through other methods,
366   // and you can refer to GetOutputByTensorName() or GetOutputs().
367   GetOutputsByNodeName(model);
368 
369   // Delete model.
370   delete model;
371   return 0;
372 }
373 
RunResize(const char * model_path)374 int RunResize(const char *model_path) {
375   size_t size = 0;
376   char *model_buf = ReadFile(model_path, &size);
377   if (model_buf == nullptr) {
378     std::cerr << "Read model file failed." << std::endl;
379     return -1;
380   }
381 
382   // Create and Build MindSpore model.
383   auto model = CreateAndBuildModel(model_buf, size);
384   delete[](model_buf);
385   if (model == nullptr) {
386     std::cerr << "Create and build model failed." << std::endl;
387     return -1;
388   }
389 
390   // Resize inputs tensor shape.
391   auto resize_ret = ResizeInputsTensorShape(model);
392   if (resize_ret != mindspore::kSuccess) {
393     delete model;
394     std::cerr << "Resize input tensor shape error." << resize_ret << std::endl;
395     return -1;
396   }
397 
398   // Set inputs data.
399   // You can also get input through other methods, and you can refer to GetInputsAndSetData()
400   auto generate_input_ret = GetInputsByTensorNameAndSetData(model);
401   if (generate_input_ret != mindspore::kSuccess) {
402     delete model;
403     std::cerr << "Set input data error " << generate_input_ret << std::endl;
404     return -1;
405   }
406 
407   auto inputs = model->GetInputs();
408   auto outputs = model->GetOutputs();
409   auto predict_ret = model->Predict(inputs, &outputs);
410   if (predict_ret != mindspore::kSuccess) {
411     delete model;
412     std::cerr << "Predict error " << predict_ret << std::endl;
413     return -1;
414   }
415 
416   // Get outputs data.
417   // You can also get output through other methods,
418   // and you can refer to GetOutputByTensorName() or GetOutputs().
419   GetOutputsByNodeName(model);
420 
421   // Delete model.
422   delete model;
423   return 0;
424 }
425 
RunCreateModelComplicated(const char * model_path)426 int RunCreateModelComplicated(const char *model_path) {
427   size_t size = 0;
428   char *model_buf = ReadFile(model_path, &size);
429   if (model_buf == nullptr) {
430     std::cerr << "Read model file failed." << std::endl;
431     return -1;
432   }
433 
434   // Create and Build MindSpore model.
435   auto model = CreateAndBuildModelComplicated(model_buf, size);
436   delete[](model_buf);
437   if (model == nullptr) {
438     std::cerr << "Create and build model failed." << std::endl;
439     return -1;
440   }
441 
442   // Set inputs data.
443   // You can also get input through other methods, and you can refer to GetInputsAndSetData()
444   auto generate_input_ret = GetInputsByTensorNameAndSetData(model);
445   if (generate_input_ret != mindspore::kSuccess) {
446     delete model;
447     std::cerr << "Set input data error " << generate_input_ret << std::endl;
448     return -1;
449   }
450 
451   auto inputs = model->GetInputs();
452   auto outputs = model->GetOutputs();
453   auto predict_ret = model->Predict(inputs, &outputs);
454   if (predict_ret != mindspore::kSuccess) {
455     delete model;
456     std::cerr << "Predict error " << predict_ret << std::endl;
457     return -1;
458   }
459 
460   // Get outputs data.
461   // You can also get output through other methods,
462   // and you can refer to GetOutputByTensorName() or GetOutputs().
463   GetOutputsByNodeName(model);
464 
465   // Delete model.
466   delete model;
467   return 0;
468 }
469 
RunModelParallel(const char * model_path)470 int RunModelParallel(const char *model_path) {
471   size_t size = 0;
472   char *model_buf = ReadFile(model_path, &size);
473   if (model_buf == nullptr) {
474     std::cerr << "Read model file failed." << std::endl;
475     return -1;
476   }
477 
478   // Create and Build MindSpore model.
479   auto model1 = CreateAndBuildModel(model_buf, size);
480   auto model2 = CreateAndBuildModel(model_buf, size);
481   delete[](model_buf);
482   if (model1 == nullptr || model2 == nullptr) {
483     std::cerr << "Create and build model failed." << std::endl;
484     return -1;
485   }
486 
487   std::thread thread1([&]() {
488     auto generate_input_ret = GetInputsByTensorNameAndSetData(model1);
489     if (generate_input_ret != mindspore::kSuccess) {
490       std::cerr << "Model1 set input data error " << generate_input_ret << std::endl;
491       return -1;
492     }
493 
494     auto inputs = model1->GetInputs();
495     auto outputs = model1->GetOutputs();
496     auto predict_ret = model1->Predict(inputs, &outputs);
497     if (predict_ret != mindspore::kSuccess) {
498       std::cerr << "Model1 predict error " << predict_ret << std::endl;
499       return -1;
500     }
501     std::cout << "Model1 predict success" << std::endl;
502     return 0;
503   });
504 
505   std::thread thread2([&]() {
506     auto generate_input_ret = GetInputsByTensorNameAndSetData(model2);
507     if (generate_input_ret != mindspore::kSuccess) {
508       std::cerr << "Model2 set input data error " << generate_input_ret << std::endl;
509       return -1;
510     }
511 
512     auto inputs = model2->GetInputs();
513     auto outputs = model2->GetOutputs();
514     auto predict_ret = model2->Predict(inputs, &outputs);
515     if (predict_ret != mindspore::kSuccess) {
516       std::cerr << "Model2 predict error " << predict_ret << std::endl;
517       return -1;
518     }
519     std::cout << "Model2 predict success" << std::endl;
520     return 0;
521   });
522 
523   thread1.join();
524   thread2.join();
525 
526   // Get outputs data.
527   // You can also get output through other methods,
528   // and you can refer to GetOutputByTensorName() or GetOutputs().
529   GetOutputsByNodeName(model1);
530   GetOutputsByNodeName(model2);
531 
532   // Delete model.
533   delete model1;
534   delete model2;
535   return 0;
536 }
537 
RunWithSharedMemoryPool(const char * model_path)538 int RunWithSharedMemoryPool(const char *model_path) {
539   size_t size = 0;
540   char *model_buf = ReadFile(model_path, &size);
541   if (model_buf == nullptr) {
542     std::cerr << "Read model file failed." << std::endl;
543     return -1;
544   }
545 
546   auto context1 = std::make_shared<mindspore::Context>();
547   if (context1 == nullptr) {
548     std::cerr << "New context failed." << std::endl;
549     return -1;
550   }
551   auto &device_list1 = context1->MutableDeviceInfo();
552   auto device_info1 = CreateCPUDeviceInfo();
553   if (device_info1 == nullptr) {
554     std::cerr << "Create CPUDeviceInfo failed." << std::endl;
555     return -1;
556   }
557   device_list1.push_back(device_info1);
558 
559   auto model1 = new (std::nothrow) mindspore::Model();
560   if (model1 == nullptr) {
561     delete[](model_buf);
562     std::cerr << "New Model failed." << std::endl;
563     return -1;
564   }
565   auto build_ret = model1->Build(model_buf, size, mindspore::kMindIR, context1);
566   if (build_ret != mindspore::kSuccess) {
567     delete[](model_buf);
568     delete model1;
569     std::cerr << "Build model failed." << std::endl;
570     return -1;
571   }
572 
573   auto context2 = std::make_shared<mindspore::Context>();
574   if (context2 == nullptr) {
575     delete[](model_buf);
576     delete model1;
577     std::cerr << "New context failed." << std::endl;
578     return -1;
579   }
580   auto &device_list2 = context2->MutableDeviceInfo();
581   auto device_info2 = CreateCPUDeviceInfo();
582   if (device_info2 == nullptr) {
583     delete[](model_buf);
584     delete model1;
585     std::cerr << "Create CPUDeviceInfo failed." << std::endl;
586     return -1;
587   }
588   // Use the same allocator to share the memory pool.
589   device_info2->SetAllocator(device_info1->GetAllocator());
590   device_list2.push_back(device_info2);
591 
592   auto model2 = new (std::nothrow) mindspore::Model();
593   if (model2 == nullptr) {
594     delete[](model_buf);
595     delete model1;
596     std::cerr << "New Model failed." << std::endl;
597     return -1;
598   }
599   build_ret = model2->Build(model_buf, size, mindspore::kMindIR, context2);
600   delete[](model_buf);
601   if (build_ret != mindspore::kSuccess) {
602     delete model1;
603     delete model2;
604     std::cerr << "Build model failed." << std::endl;
605     return -1;
606   }
607 
608   // Set inputs data.
609   // You can also get input through other methods, and you can refer to GetInputsAndSetData()
610   GetInputsByTensorNameAndSetData(model1);
611   GetInputsByTensorNameAndSetData(model2);
612 
613   auto inputs1 = model1->GetInputs();
614   auto outputs1 = model1->GetOutputs();
615   auto predict_ret = model1->Predict(inputs1, &outputs1);
616   if (predict_ret != mindspore::kSuccess) {
617     delete model1;
618     delete model2;
619     std::cerr << "Inference error " << predict_ret << std::endl;
620     return -1;
621   }
622 
623   auto inputs2 = model2->GetInputs();
624   auto outputs2 = model2->GetOutputs();
625   predict_ret = model2->Predict(inputs2, &outputs2);
626   if (predict_ret != mindspore::kSuccess) {
627     delete model1;
628     delete model2;
629     std::cerr << "Inference error " << predict_ret << std::endl;
630     return -1;
631   }
632 
633   // Get outputs data.
634   // You can also get output through other methods,
635   // and you can refer to GetOutputByTensorName() or GetOutputs().
636   GetOutputsByNodeName(model1);
637   GetOutputsByNodeName(model2);
638 
639   // Delete model.
640   delete model1;
641   delete model2;
642   return 0;
643 }
644 
RunCallback(const char * model_path)645 int RunCallback(const char *model_path) {
646   size_t size = 0;
647   char *model_buf = ReadFile(model_path, &size);
648   if (model_buf == nullptr) {
649     std::cerr << "Read model file failed." << std::endl;
650     return -1;
651   }
652 
653   // Create and Build MindSpore model.
654   auto model = CreateAndBuildModel(model_buf, size);
655   delete[](model_buf);
656   if (model == nullptr) {
657     delete model;
658     std::cerr << "Create model failed." << std::endl;
659     return -1;
660   }
661 
662   // Set inputs data.
663   // You can also get input through other methods, and you can refer to GetInputsAndSetData()
664   auto generate_input_ret = GetInputsByTensorNameAndSetData(model);
665   if (generate_input_ret != mindspore::kSuccess) {
666     delete model;
667     std::cerr << "Set input data error " << generate_input_ret << std::endl;
668     return -1;
669   }
670 
671   // Definition of callback function before forwarding operator.
672   auto before_call_back = [](const std::vector<mindspore::MSTensor> &before_inputs,
673                              const std::vector<mindspore::MSTensor> &before_outputs,
674                              const mindspore::MSCallBackParam &call_param) {
675     std::cout << "Before forwarding " << call_param.node_name_ << " " << call_param.node_type_ << std::endl;
676     return true;
677   };
678   // Definition of callback function after forwarding operator.
679   auto after_call_back = [](const std::vector<mindspore::MSTensor> &after_inputs,
680                             const std::vector<mindspore::MSTensor> &after_outputs,
681                             const mindspore::MSCallBackParam &call_param) {
682     std::cout << "After forwarding " << call_param.node_name_ << " " << call_param.node_type_ << std::endl;
683     return true;
684   };
685 
686   auto inputs = model->GetInputs();
687   auto outputs = model->GetOutputs();
688   auto predict_ret = model->Predict(inputs, &outputs, before_call_back, after_call_back);
689   if (predict_ret != mindspore::kSuccess) {
690     delete model;
691     std::cerr << "Predict error " << predict_ret << std::endl;
692     return -1;
693   }
694 
695   // Get outputs data.
696   // You can also get output through other methods,
697   // and you can refer to GetOutputByTensorName() or GetOutputs().
698   GetOutputsByNodeName(model);
699 
700   // Delete model.
701   delete model;
702   return 0;
703 }
704 
main(int argc,const char ** argv)705 int main(int argc, const char **argv) {
706   if (argc < 3) {
707     std::cerr << "Usage: ./runtime_cpp model_path Option" << std::endl;
708     std::cerr << "Example: ./runtime_cpp ../model/mobilenetv2.ms 0" << std::endl;
709     std::cerr << "When your Option is 0, you will run MindSpore Lite predict." << std::endl;
710     std::cerr << "When your Option is 1, you will run MindSpore Lite predict with resize." << std::endl;
711     std::cerr << "When your Option is 2, you will run MindSpore Lite predict with complicated API." << std::endl;
712     std::cerr << "When your Option is 3, you will run MindSpore Lite predict with model parallel." << std::endl;
713     std::cerr << "When your Option is 4, you will run MindSpore Lite predict with shared memory pool." << std::endl;
714     std::cerr << "When your Option is 5, you will run MindSpore Lite predict with callback." << std::endl;
715     return -1;
716   }
717   std::string version = mindspore::Version();
718   std::cout << "MindSpore Lite Version is " << version << std::endl;
719   auto model_path = RealPath(argv[1]);
720   if (model_path.empty()) {
721     std::cerr << "model path " << argv[1] << " is invalid.";
722     return -1;
723   }
724   auto flag = argv[2];
725   if (strcmp(flag, "0") == 0) {
726     return Run(model_path.c_str());
727   } else if (strcmp(flag, "1") == 0) {
728     return RunResize(model_path.c_str());
729   } else if (strcmp(flag, "2") == 0) {
730     return RunCreateModelComplicated(model_path.c_str());
731   } else if (strcmp(flag, "3") == 0) {
732     return RunModelParallel(model_path.c_str());
733   } else if (strcmp(flag, "4") == 0) {
734     return RunWithSharedMemoryPool(model_path.c_str());
735   } else if (strcmp(flag, "5") == 0) {
736     return RunCallback(model_path.c_str());
737   } else {
738     std::cerr << "Unsupported Flag " << flag << std::endl;
739     return -1;
740   }
741 }
742