• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021-2022 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <vector>
18 #include <memory>
19 #include <string>
20 #include <fstream>
21 
22 #include "extendrt/mindir_loader/mindir_model/mindir_model.h"
23 #include "utils/ms_utils_secure.h"
24 #include "extendrt/mindir_loader/mindir_model/mindir_model_util.h"
25 #include "extendrt/mindir_loader/mindir_model/kernel_mod_util.h"
26 #include "src/executor/kernel_exec.h"
27 #include "extendrt/mindir_loader/mindir_model/inner_kernel.h"
28 #include "extendrt/mock/lite_runtime/populate/base_operator_populate_register.h"
29 
30 #include "src/litert/kernel_registry.h"
31 
32 namespace mindspore::infer::mindir {
33 #define IS_LITTLE_ENDIAN (uint8_t)1U
34 
ModelVerify() const35 bool MindirModel::ModelVerify() const { return true; }
36 
ConvertTensors(std::vector<mindspore::lite::Tensor * > * lite_tensors)37 int MindirModel::ConvertTensors(std::vector<mindspore::lite::Tensor *> *lite_tensors) {
38   if (lite_tensors == nullptr) {
39     MS_LOG(ERROR) << "lite tensors is null.";
40     return mindspore::lite::RET_NULL_PTR;
41   }
42 
43   uint32_t tensor_count = this->all_mindir_tensors_.size();
44   auto model_input_indices = this->graph_.input_indices_;
45   auto model_output_indices = this->graph_.output_indices_;
46 
47   for (uint32_t i = 0; i < tensor_count; ++i) {
48     auto src_tensor = this->all_mindir_tensors_[i];
49     auto *dst_tensor = ConvertTensor(src_tensor);
50     if (dst_tensor == nullptr) {
51       MS_LOG(ERROR) << "Convert new " << i << "th tensor failed!";
52       return mindspore::lite::RET_NULL_PTR;
53     }
54 
55     if (mindspore::lite::IsContain(model_input_indices, i)) {
56       dst_tensor->set_category(mindspore::lite::Category::GRAPH_INPUT);
57     }
58     if (mindspore::lite::IsContain(model_output_indices, i)) {
59       // a tensor is as both input and output, would be treated as an input.
60       if (!dst_tensor->IsGraphInput()) {
61         dst_tensor->set_category(mindspore::lite::Category::GRAPH_OUTPUT);
62       }
63     }
64 
65     auto ret = CheckTensorValid(dst_tensor);
66     if (ret != RET_OK) {
67       MS_LOG(ERROR) << "Check " << i << "th tensor failed";
68       delete dst_tensor;
69       return ret;
70     }
71 
72     lite_tensors->emplace_back(dst_tensor);
73   }
74   return mindspore::lite::RET_OK;
75 }
76 
GetModelPath() const77 std::string MindirModel::GetModelPath() const { return this->model_path_; }
78 
FindBackendKernel(const std::vector<mindspore::lite::Tensor * > & in_tensors,const std::vector<mindspore::lite::Tensor * > & out_tensors,const LiteGraph::Node * node,lite::InnerContext * context,TypeId prefer_data_type)79 mindspore::kernel::KernelExec *MindirModel::FindBackendKernel(const std::vector<mindspore::lite::Tensor *> &in_tensors,
80                                                               const std::vector<mindspore::lite::Tensor *> &out_tensors,
81                                                               const LiteGraph::Node *node, lite::InnerContext *context,
82                                                               TypeId prefer_data_type) {
83   if (select_lite_kernel_) {
84     return FindLiteKernel(in_tensors, out_tensors, node, context, prefer_data_type);
85   }
86   std::shared_ptr<kernel::InnerKernel> inner_kernel =
87     mindspore::kernel::KernelModUtil::GetInnerKernel(in_tensors, out_tensors, node, context);
88   kernel::KernelExec *kernel_exec = new kernel::KernelExec(inner_kernel);
89   auto desc = kernel_exec->desc();
90   desc.data_type = in_tensors.front()->data_type();
91   kernel_exec->set_desc(desc);
92   return kernel_exec;
93 }
94 
FindLiteKernel(const std::vector<mindspore::lite::Tensor * > & in_tensors,const std::vector<mindspore::lite::Tensor * > & out_tensors,const LiteGraph::Node * node,lite::InnerContext * context,TypeId prefer_data_type)95 mindspore::kernel::KernelExec *MindirModel::FindLiteKernel(const std::vector<mindspore::lite::Tensor *> &in_tensors,
96                                                            const std::vector<mindspore::lite::Tensor *> &out_tensors,
97                                                            const LiteGraph::Node *node, lite::InnerContext *context,
98                                                            TypeId prefer_data_type) {
99   mindspore::kernel::KernelExec *kernel_exec = nullptr;
100   auto op_type_str = node->op_type_;
101   auto op_type = BaseOperatorPopulateRegistry::GetInstance()->TypeStrToType(op_type_str);
102   auto parame_gen = BaseOperatorPopulateRegistry::GetInstance()->GetParameterCreator(op_type);
103   if (parame_gen == nullptr) {
104     MS_LOG(ERROR) << "parameter generator is nullptr.";
105     return nullptr;
106   }
107   OpParameter *op_parameter = parame_gen(node->base_operator_.get());
108   kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, kNumberTypeInt32, NHWC, op_type, "", kernel::kBuiltin};
109   auto ret = lite::KernelRegistry::GetInstance()->GetKernelExec(in_tensors, out_tensors, context, nullptr, desc,
110                                                                 op_parameter, &kernel_exec, node->primitive_);
111   if (ret != lite::RET_OK || kernel_exec == nullptr) {
112     MS_LOG(ERROR) << "find lite kernel failed with code " << ret << ", node: " << node->name_
113                   << ", type: " << node->op_type_;
114     return nullptr;
115   }
116   return kernel_exec;
117 }
118 
ConvertTensor(TensorProtoWrap mindir_tensor_wrap)119 mindspore::lite::Tensor *MindirModel::ConvertTensor(TensorProtoWrap mindir_tensor_wrap) {
120   auto mindir_tensor = mindir_tensor_wrap.tensor_proto();
121   auto data_type = MindirModelUtil::ProtoTypeToTypeId(mindir_tensor.data_type());
122   std::vector<int> shape;
123   for (int i = 0; i < mindir_tensor.dims_size(); i++) {
124     shape.push_back(mindir_tensor.dims(i));
125   }
126   auto format = Format::NCHW;
127   mindspore::lite::NodeType node_type;
128   if (mindir_tensor.has_raw_data() || mindir_tensor.has_external_data()) {
129     node_type = mindspore::lite::NodeType_ValueNode;
130   } else {
131     node_type = mindspore::lite::NodeType_CNode;
132   }
133   auto category = TensorCategory(node_type, mindir_tensor.dims_size(), data_type, mindir_tensor.raw_data().size());
134   auto *lite_tensor = new mindspore::lite::Tensor(data_type, shape, format, category);
135   lite_tensor->set_tensor_name(mindir_tensor_wrap.name());
136   if (this->LoadTensorData(lite_tensor, mindir_tensor) != RET_OK) {
137     MS_LOG(WARNING) << "MindirModel: Convert tensor failed, load tensor data failed, tensor data will be empty.";
138   }
139   return lite_tensor;
140 }
141 
LoadTensorData(mindspore::lite::Tensor * lite_tensor,const mind_ir::TensorProto & mindir_tensor)142 int MindirModel::LoadTensorData(mindspore::lite::Tensor *lite_tensor, const mind_ir::TensorProto &mindir_tensor) {
143   if (mindir_tensor.has_raw_data()) {
144     return memcpy_s(lite_tensor->MutableData(), lite_tensor->Size(), mindir_tensor.raw_data().data(),
145                     mindir_tensor.raw_data().size());
146   }
147   if (mindir_tensor.has_external_data()) {
148     std::string file = this->GetModelPath() + "/" + mindir_tensor.external_data().location();
149     // Read file
150     std::basic_ifstream<char> fid(file, std::ios::in | std::ios::binary);
151     if (!fid) {
152       MS_LOG(ERROR) << "Open file '" << file << "' failed, please check the correct of the file.";
153       return RET_OK;
154     }
155     fid.seekg(0, std::ios_base::end);
156     size_t file_size = static_cast<size_t>(fid.tellg());
157     fid.clear();
158     fid.seekg(0);
159     auto plain_data = std::make_unique<char[]>(file_size);
160     constexpr uint8_t is_little_endian = 1;
161     constexpr int byte_order_index = 0;
162     fid.read(plain_data.get(), file_size);
163     fid.close();
164     // if byte order is not same return false
165     if ((plain_data[byte_order_index] == is_little_endian) != common::IsLittleByteOrder()) {
166       MS_LOG(ERROR) << "The byte order of export MindIr device and load MindIr device is not same!";
167       return mindspore::lite::RET_ERROR;
168     }
169     const uint8_t *data = reinterpret_cast<const uint8_t *>(plain_data.get());
170     auto ret =
171       common::huge_memcpy(reinterpret_cast<uint8_t *>(lite_tensor->MutableData()), lite_tensor->Size(),
172                           data + mindir_tensor.external_data().offset(), mindir_tensor.external_data().length());
173     if (ret != 0) {
174       MS_LOG(ERROR) << "Build parameter occur memcpy_s error.";
175       return mindspore::lite::RET_OK;
176     }
177     return mindspore::lite::RET_OK;
178   }
179   return mindspore::lite::RET_NOT_SUPPORT;
180 }
181 
CheckTensorValid(lite::Tensor * dst_tensor)182 int MindirModel::CheckTensorValid(lite::Tensor *dst_tensor) {
183   MS_ASSERT(dst_tensor != nullptr);
184   if (dst_tensor->data_type() == kObjectTypeTensorType) {
185     return mindspore::lite::RET_OK;
186   }
187   if (dst_tensor->IsGraphInput() || dst_tensor->IsGraphOutput()) {
188     return mindspore::lite::RET_OK;
189   }
190   if (dst_tensor->IsConst() == false && dst_tensor->data() != nullptr) {
191     return mindspore::lite::RET_ERROR;
192   }
193   return mindspore::lite::RET_OK;
194 }
195 
Free()196 void MindirModel::Free() {
197   if (this->buf != nullptr) {
198     delete[](this->buf);
199     this->buf = nullptr;
200   }
201   auto nodes_size = this->graph_.all_nodes_.size();
202   for (size_t i = 0; i < nodes_size; ++i) {
203     auto node = this->graph_.all_nodes_[i];
204     auto *primitive_ptr = reinterpret_cast<ops::BaseOperator *>(const_cast<void *>(node->primitive_));
205     delete primitive_ptr;
206     node->primitive_ = nullptr;
207   }
208 }
209 
Destroy()210 void MindirModel::Destroy() {
211   Free();
212 
213   this->all_mindir_tensors_.clear();
214 
215   auto nodes_size = this->graph_.all_nodes_.size();
216   for (size_t i = 0; i < nodes_size; ++i) {
217     auto node = this->graph_.all_nodes_[i];
218     MS_ASSERT(node != nullptr);
219     delete node;
220   }
221   this->graph_.all_nodes_.clear();
222 
223   auto sub_graph_size = this->graph_.sub_graphs_.size();
224   for (size_t i = 0; i < sub_graph_size; ++i) {
225     auto sub_graph = this->graph_.sub_graphs_[i];
226     delete sub_graph;
227   }
228 }
229 }  // namespace mindspore::infer::mindir
230