• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #include "log.h"
17 #include "mindir.h"
18 #include "utils.h"
19 #include "securec.h"
20 #include "mindir_memory_manager.h"
21 
22 namespace mindspore {
23 namespace lite {
24 // ********** Tensor **********
MindIR_Tensor_Create()25 TensorPtr MindIR_Tensor_Create() {
26   flatbuffers::FlatBufferBuilder fbb;
27   std::vector<int32_t> dims(1, 0);
28   std::vector<uint8_t> data(1, 0);
29   std::vector<QuantParam> quant_params(1, {0, 0, 8});
30   std::string name = " ";
31   auto ops_offset = schema::CreateTensor(fbb, 0, DataType::DATA_TYPE_INT32, 0, schema::Format::Format_NCHW, 0, 0, 0, 0,
32                                          0, fbb.CreateString(name.c_str(), name.size()));
33   fbb.Finish(ops_offset);
34   auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, nullptr);
35   auto ret_value = flatbuffers::GetMutableRoot<schema::Tensor>(new_addr);
36   return ret_value;
37 }
38 
MindIR_Tensor_Create(const char * name,DataType data_type,const int32_t * dims,uint32_t dims_size,Format format,const uint8_t * data,uint32_t data_size,const QuantParam * quant_params,uint32_t quant_params_size)39 TensorPtr MindIR_Tensor_Create(const char *name, DataType data_type, const int32_t *dims, uint32_t dims_size,
40                                Format format, const uint8_t *data, uint32_t data_size,
41                                const QuantParam *quant_params, uint32_t quant_params_size) {
42   flatbuffers::FlatBufferBuilder fbb;
43 
44   auto ops_offset =
45     schema::CreateTensor(fbb, 0, data_type, fbb.CreateVector(dims, dims_size),
46                          static_cast<schema::Format>(format), 0, 0, fbb.CreateVector(data, data_size),
47                          ConvertQuantParams(fbb, quant_params, quant_params_size), 0, fbb.CreateString(name, strlen(name)));
48   fbb.Finish(ops_offset);
49   auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, nullptr);
50   auto ret_value = flatbuffers::GetMutableRoot<schema::Tensor>(new_addr);
51   return ret_value;
52 }
53 
MindIR_Tensor_GetName(ConstTensorPtr tensor)54 std::string MindIR_Tensor_GetName(ConstTensorPtr tensor) {
55   if (tensor != nullptr) {
56     auto value = static_cast<const schema::Tensor *>(tensor);
57     if (value != nullptr && value->name() != nullptr) {
58       return value->name()->str();
59     } else {
60       return "";
61     }
62   } else {
63     return "";
64   }
65 }
66 
MindIR_Tensor_SetName(TensorPtr * tensor,const std::string & name)67 void MindIR_Tensor_SetName(TensorPtr *tensor, const std::string &name) {
68   if (tensor != nullptr && *tensor != nullptr) {
69     auto value = static_cast<schema::Tensor *>(*tensor);
70     if (value != nullptr) {
71       flatbuffers::FlatBufferBuilder fbb;
72       flatbuffers::Offset<flatbuffers::Vector<int32_t>> dims;
73       if (value->dims() == nullptr || value->dims()->size() <= 0) {
74         dims = 0;
75       } else {
76         dims = fbb.CreateVector(value->dims()->data(), value->dims()->size());
77       }
78       flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data;
79       if (value->data() == nullptr || value->data()->size() <= 0) {
80         data = 0;
81       } else {
82         data = fbb.CreateVector(value->data()->data(), value->data()->size());
83       }
84       auto ops_offset = schema::CreateTensor(
85         fbb, 0, value->dataType(), dims, static_cast<schema::Format>(value->format()), 0, 0, data,
86         ConvertQuantParams(fbb, value->quantParams()), 0, fbb.CreateString(name.c_str(), name.size()));
87       fbb.Finish(ops_offset);
88       auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value);
89       auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
90       *tensor = ret_value;
91     }
92   }
93 }
MindIR_Tensor_GetDataType(ConstTensorPtr tensor)94 DataType MindIR_Tensor_GetDataType(ConstTensorPtr tensor) {
95   if (tensor != nullptr) {
96     auto value = static_cast<const schema::Tensor *>(tensor);
97     if (value != nullptr) {
98       return static_cast<DataType>(value->dataType());
99     } else {
100       DataType en = DATA_TYPE_INT32;
101       return en;
102     }
103   } else {
104     DataType en = DATA_TYPE_INT32;
105     return en;
106   }
107 }
108 
MindIR_Tensor_SetDataType(TensorPtr * tensor,DataType data_type)109 void MindIR_Tensor_SetDataType(TensorPtr *tensor, DataType data_type) {
110   if (tensor != nullptr && *tensor != nullptr) {
111     auto value = static_cast<schema::Tensor *>(*tensor);
112     if (value != nullptr) {
113       flatbuffers::FlatBufferBuilder fbb;
114       flatbuffers::Offset<flatbuffers::Vector<int32_t>> dims;
115       if (value->dims() == nullptr || value->dims()->size() <= 0) {
116         dims = 0;
117       } else {
118         dims = fbb.CreateVector(value->dims()->data(), value->dims()->size());
119       }
120       flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data;
121       if (value->data() == nullptr || value->data()->size() <= 0) {
122         data = 0;
123       } else {
124         data = fbb.CreateVector(value->data()->data(), value->data()->size());
125       }
126       flatbuffers::Offset<flatbuffers::String> name;
127       if (value->name() == nullptr || value->name()->size() <= 0) {
128         name = 0;
129       } else {
130         name = fbb.CreateString(value->name()->c_str(), value->name()->size());
131       }
132       auto ops_offset =
133         schema::CreateTensor(fbb, 0, data_type, dims, static_cast<schema::Format>(value->format()), 0, 0, data,
134                              ConvertQuantParams(fbb, value->quantParams()), 0, name);
135       fbb.Finish(ops_offset);
136       auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value);
137       auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
138       *tensor = ret_value;
139     }
140   }
141 }
142 
MindIR_Tensor_GetDims(ConstTensorPtr tensor)143 std::vector<int32_t> MindIR_Tensor_GetDims(ConstTensorPtr tensor) {
144   if (tensor != nullptr) {
145     auto value = static_cast<const schema::Tensor *>(tensor);
146     if (value != nullptr) {
147       std::vector<int32_t> result;
148       auto src = value->dims();
149       if (src == nullptr) {
150         return {};
151       }
152       result.resize(src->size());
153       std::transform(src->begin(), src->end(), result.begin(), [](int32_t item) { return item; });
154       return result;
155     } else {
156       return {};
157     }
158   } else {
159     return {};
160   }
161 }
162 
MindIR_Tensor_SetDims(TensorPtr * tensor,const std::vector<int32_t> & dims)163 void MindIR_Tensor_SetDims(TensorPtr *tensor, const std::vector<int32_t> &dims) {
164   if (tensor != nullptr && *tensor != nullptr) {
165     auto value = static_cast<schema::Tensor *>(*tensor);
166     if (value != nullptr) {
167       flatbuffers::FlatBufferBuilder fbb;
168       flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data;
169       if (value->data() == nullptr || value->data()->size() <= 0) {
170         data = 0;
171       } else {
172         data = fbb.CreateVector(value->data()->data(), value->data()->size());
173       }
174       flatbuffers::Offset<flatbuffers::String> name;
175       if (value->name() == nullptr || value->name()->size() <= 0) {
176         name = 0;
177       } else {
178         name = fbb.CreateString(value->name()->c_str(), value->name()->size());
179       }
180       auto ops_offset = schema::CreateTensor(fbb, 0, value->dataType(), fbb.CreateVector(dims.data(), dims.size()),
181                                              static_cast<schema::Format>(value->format()), 0, 0, data,
182                                              ConvertQuantParams(fbb, value->quantParams()), 0, name);
183       fbb.Finish(ops_offset);
184       auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value);
185       auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
186       *tensor = ret_value;
187     }
188   }
189 }
MindIR_Tensor_GetFormat(ConstTensorPtr tensor)190 Format MindIR_Tensor_GetFormat(ConstTensorPtr tensor) {
191   if (tensor != nullptr) {
192     auto value = static_cast<const schema::Tensor *>(tensor);
193     if (value != nullptr) {
194       return static_cast<Format>(value->format());
195     } else {
196       Format en = FORMAT_NCHW;
197       return en;
198     }
199   } else {
200     Format en = FORMAT_NCHW;
201     return en;
202   }
203 }
204 
MindIR_Tensor_SetFormat(TensorPtr * tensor,Format format)205 void MindIR_Tensor_SetFormat(TensorPtr *tensor, Format format) {
206   if (tensor != nullptr && *tensor != nullptr) {
207     auto value = static_cast<schema::Tensor *>(*tensor);
208     if (value != nullptr) {
209       flatbuffers::FlatBufferBuilder fbb;
210       flatbuffers::Offset<flatbuffers::Vector<int32_t>> dims;
211       if (value->dims() == nullptr || value->dims()->size() <= 0) {
212         dims = 0;
213       } else {
214         dims = fbb.CreateVector(value->dims()->data(), value->dims()->size());
215       }
216       flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data;
217       if (value->data() == nullptr || value->data()->size() <= 0) {
218         data = 0;
219       } else {
220         data = fbb.CreateVector(value->data()->data(), value->data()->size());
221       }
222       flatbuffers::Offset<flatbuffers::String> name;
223       if (value->name() == nullptr || value->name()->size() <= 0) {
224         name = 0;
225       } else {
226         name = fbb.CreateString(value->name()->c_str(), value->name()->size());
227       }
228       auto ops_offset = schema::CreateTensor(fbb, 0, value->dataType(), dims, static_cast<schema::Format>(format), 0, 0,
229                                              data, ConvertQuantParams(fbb, value->quantParams()), 0, name);
230       fbb.Finish(ops_offset);
231       auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value);
232       auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
233       *tensor = ret_value;
234     }
235   }
236 }
237 
MindIR_Tensor_GetData(ConstTensorPtr tensor)238 std::vector<uint8_t> MindIR_Tensor_GetData(ConstTensorPtr tensor) {
239   if (tensor != nullptr) {
240     auto value = static_cast<const schema::Tensor *>(tensor);
241     if (value != nullptr) {
242       std::vector<uint8_t> result;
243       auto src = value->data();
244       if (src == nullptr) {
245         return {};
246       }
247       result.resize(src->size());
248       std::transform(src->begin(), src->end(), result.begin(), [](uint8_t item) { return item; });
249       return result;
250     } else {
251       return {};
252     }
253   } else {
254     return {};
255   }
256 }
257 
MindIR_Tensor_SetData(TensorPtr * tensor,const std::vector<uint8_t> & data)258 void MindIR_Tensor_SetData(TensorPtr *tensor, const std::vector<uint8_t> &data) {
259   if (tensor != nullptr && *tensor != nullptr) {
260     auto value = static_cast<schema::Tensor *>(*tensor);
261     if (value != nullptr) {
262       flatbuffers::FlatBufferBuilder fbb;
263       flatbuffers::Offset<flatbuffers::Vector<int32_t>> dims;
264       if (value->dims() == nullptr || value->dims()->size() <= 0) {
265         dims = 0;
266       } else {
267         dims = fbb.CreateVector(value->dims()->data(), value->dims()->size());
268       }
269       flatbuffers::Offset<flatbuffers::String> name;
270       if (value->name() == nullptr || value->name()->size() <= 0) {
271         name = 0;
272       } else {
273         name = fbb.CreateString(value->name()->c_str(), value->name()->size());
274       }
275       auto ops_offset = schema::CreateTensor(
276         fbb, 0, value->dataType(), dims, static_cast<schema::Format>(value->format()), 0, 0,
277         fbb.CreateVector(data.data(), data.size()), ConvertQuantParams(fbb, value->quantParams()), 0, name);
278       fbb.Finish(ops_offset);
279       auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value);
280       auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
281       *tensor = ret_value;
282     }
283   }
284 }
MindIR_Tensor_GetQuantParams(ConstTensorPtr tensor)285 std::vector<QuantParam> MindIR_Tensor_GetQuantParams(ConstTensorPtr tensor) {
286   if (tensor != nullptr) {
287     auto value = static_cast<const schema::Tensor *>(tensor);
288     if (value != nullptr) {
289       std::vector<QuantParam> result;
290       auto src = value->quantParams();
291       if (src == nullptr) {
292         return {};
293       }
294       size_t size = src->size();
295       result.reserve(src->size());
296       for (size_t i = 0; i < size; i++) {
297         auto tmp = src->Get(i);
298         QuantParam q{tmp->zeroPoint(), tmp->scale(), tmp->numBits()};
299         result.emplace_back(q);
300       }
301       return result;
302     } else {
303       return {};
304     }
305   } else {
306     return {};
307   }
308 }
309 
MindIR_Tensor_SetQuantParams(TensorPtr * tensor,const std::vector<QuantParam> & quant_params)310 void MindIR_Tensor_SetQuantParams(TensorPtr *tensor, const std::vector<QuantParam> &quant_params) {
311   if (tensor != nullptr && *tensor != nullptr) {
312     auto value = static_cast<schema::Tensor *>(*tensor);
313     if (value != nullptr) {
314       flatbuffers::FlatBufferBuilder fbb;
315       flatbuffers::Offset<flatbuffers::Vector<int32_t>> dims;
316       if (value->dims() == nullptr || value->dims()->size() <= 0) {
317         dims = 0;
318       } else {
319         dims = fbb.CreateVector(value->dims()->data(), value->dims()->size());
320       }
321       flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data;
322       if (value->data() == nullptr || value->data()->size() <= 0) {
323         data = 0;
324       } else {
325         data = fbb.CreateVector(value->data()->data(), value->data()->size());
326       }
327       flatbuffers::Offset<flatbuffers::String> name;
328       if (value->name() == nullptr || value->name()->size() <= 0) {
329         name = 0;
330       } else {
331         name = fbb.CreateString(value->name()->c_str(), value->name()->size());
332       }
333       auto ops_offset =
334         schema::CreateTensor(fbb, 0, value->dataType(), dims, static_cast<schema::Format>(value->format()), 0, 0, data,
335                              ConvertQuantParams(fbb, quant_params.data(), quant_params.size()), 0, name);
336       fbb.Finish(ops_offset);
337       auto new_addr = MindIRMemoryManager::GetInstance()->CreateTensorFromBuilder(fbb, value);
338       auto ret_value = flatbuffers::GetMutableRoot<schema::Primitive>(new_addr);
339       *tensor = ret_value;
340     }
341   }
342 }
343 
MindIR_Tensor_GetNodeType(ConstTensorPtr tensor)344 int32_t MindIR_Tensor_GetNodeType(ConstTensorPtr tensor) {
345   if (tensor != nullptr) {
346     auto value = static_cast<const schema::Tensor *>(tensor);
347     if (value != nullptr) {
348       return value->nodeType();
349     } else {
350       return 0;
351     }
352   } else {
353     return 0;
354   }
355 }
356 
MindIR_Tensor_Destroy(TensorPtr * tensor)357 void MindIR_Tensor_Destroy(TensorPtr *tensor) {
358   if (tensor != nullptr && *tensor != nullptr) {
359     auto schema = static_cast<schema::Tensor *>(*tensor);
360     MindIRMemoryManager::GetInstance()->DeleteTensor(schema);
361     *tensor = nullptr;
362   }
363 }
364 }  // namespace lite
365 }  // namespace mindspore
366