• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #include "coder/utils/coder_utils.h"
17 #include <set>
18 #include <queue>
19 #include <string>
20 #include <memory>
21 #include <fstream>
22 #include "coder/log.h"
23 #include "coder/utils/type_cast.h"
24 #include "coder/allocator/allocator.h"
25 
26 namespace mindspore::lite::micro {
CheckConstantTensor(const Tensor * const tensor)27 bool CheckConstantTensor(const Tensor *const tensor) {
28   return tensor->category() == Tensor::Category::CONST_TENSOR || tensor->category() == Tensor::Category::CONST_SCALAR;
29 }
30 
31 template <typename T>
TensorDataToFile(const lite::Tensor * tensor,std::ofstream & ofs)32 void TensorDataToFile(const lite::Tensor *tensor, std::ofstream &ofs) {
33   const int NUM = 45;
34   T *data = reinterpret_cast<T *>(tensor->data());
35   if (data == nullptr) {
36     MS_LOG(ERROR) << "data is nullptr";
37     return;
38   }
39   ofs << "{\n";
40   if (typeid(T) == typeid(float)) {
41     ofs.precision(kWeightPrecision);
42   }
43   int len = tensor->ElementsNum();
44   for (int i = 0; i < len; ++i) {
45     ofs << std::to_string(data[i]) << ", ";
46     if (i % NUM == NUM - 1) {
47       ofs << "\n";
48     }
49   }
50   ofs << "\n};\n\n";
51 }
52 
PrintTensorData(const lite::Tensor * tensor,std::ofstream & ofs)53 void PrintTensorData(const lite::Tensor *tensor, std::ofstream &ofs) {
54   TypeId type = tensor->data_type();
55   switch (tensor->data_type()) {
56     case kNumberTypeFloat:
57     case kNumberTypeFloat32:
58       TensorDataToFile<float>(tensor, ofs);
59       break;
60     case kNumberTypeInt8:
61       TensorDataToFile<int8_t>(tensor, ofs);
62       break;
63     case kNumberTypeInt:
64     case kNumberTypeInt32:
65       TensorDataToFile<int32_t>(tensor, ofs);
66       break;
67     case kNumberTypeInt64:
68       TensorDataToFile<int64_t>(tensor, ofs);
69       break;
70     case kNumberTypeUInt8:
71       TensorDataToFile<uint8_t>(tensor, ofs);
72       break;
73     case kNumberTypeUInt32:
74       TensorDataToFile<uint32_t>(tensor, ofs);
75       break;
76     default:
77       MS_LOG(ERROR) << "unsupported data type: " << EnumNameDataType(type);
78       break;
79   }
80 }
81 
TensorsToString(const std::vector<Tensor * > & tensors,const std::string & is_input)82 std::string TensorsToString(const std::vector<Tensor *> &tensors, const std::string &is_input) {
83   MemoryAllocator *allocator = MemoryAllocator::GetInstance();
84   std::string info;
85   for (const auto &tensor : tensors) {
86     if (CheckConstantTensor(tensor)) {
87       continue;
88     }
89     info += "      {\n";
90     info += "      int dim[] = " + ArrayToString(tensor->shape()) + ";\n";
91     info += "      MicroTensor tensor = {";
92     info += EnumMicroTensorDataType(tensor->data_type()) + ", ";
93     info += EnumMicroTensorFormat(tensor->format()) + ", ";
94     info += std::to_string(tensor->shape().size()) + ", dim, ";
95     info += allocator->GetRuntimeAddr(tensor) + "};\n";
96     info += "      fprintf(output_file, \"" + is_input + " Tensor: " + allocator->GetRuntimeAddr(tensor) + "\\n\");\n";
97     info += "      PrintTensor(&tensor, output_file, \"" + is_input + "\");\n";
98     info += "      }\n";
99   }
100   return info;
101 }
102 
AddDumpDataInfo(const std::vector<std::string> & blocks,const std::vector<std::unique_ptr<OperatorCoder>> & opcoders)103 std::vector<std::string> AddDumpDataInfo(const std::vector<std::string> &blocks,
104                                          const std::vector<std::unique_ptr<OperatorCoder>> &opcoders) {
105   std::vector<std::string> results;
106   if (blocks.size() != opcoders.size()) {
107     MS_LOG(ERROR) << "error, coder blocks size is not equal to opcoders size";
108     return results;
109   }
110   size_t num = opcoders.size();
111   for (size_t i = 0; i < num; ++i) {
112     auto &opcoder = opcoders.at(i);
113     std::string code = blocks.at(i);
114     std::string name = opcoder->name();
115     code += "    {\n";
116     code += "      FILE *output_file = fopen(\"./" + name + ".ir\", \"w\");\n";
117     code += "      fprintf(output_file, \"Node:" + name + "\\n\");\n";
118     code += TensorsToString(opcoder->input_tensors(), "input");
119     code += TensorsToString(opcoder->output_tensors(), "output");
120     code += "      fclose(output_file);\n";
121     code += "    }\n";
122     results.emplace_back(code);
123   }
124   return results;
125 }
126 
SplitString(std::string str,const std::string & pattern)127 std::vector<std::string> SplitString(std::string str, const std::string &pattern) {
128   std::vector<std::string> results;
129   if (str.empty()) {
130     MS_LOG(ERROR) << "source string is empty";
131     return results;
132   }
133   str += pattern;
134   while (!str.empty()) {
135     size_t size = str.size();
136     size_t pos = str.find(pattern);
137     std::string sub_string = str.substr(0, pos);
138     results.push_back(sub_string);
139     str = str.substr(pos + 1, size);
140   }
141   return results;
142 }
143 }  // namespace mindspore::lite::micro
144