• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #include <unordered_map>
17 #include "include/c_api/tensor_c.h"
18 #include "include/api/status.h"
19 #include "src/tensor.h"
20 #include "src/litert/cxx_api/tensor/tensor_impl.h"
21 #ifdef SUPPORT_NNRT
22 #include "src/litert/delegate/nnrt/nnrt_allocator.h"
23 #endif
24 
25 // allocator_table contains mapping of raw_ptr to weak_ptr of Allocator, allocator_table_mutex is used in multi-thread
26 // scene when user build multiple models, to avoid read/write unordered_map conflicts crash.
27 static std::unordered_map<void *, std::weak_ptr<mindspore::Allocator>> allocator_table;
28 static std::mutex allocator_table_mutex;
29 
CleanAllocatorTable()30 void CleanAllocatorTable() {
31   std::lock_guard<std::mutex> lock(allocator_table_mutex);
32 #ifdef SUPPORT_NNRT
33   auto nnrt_allocator = mindspore::lite::NNRTAllocator::GetInstance().get();
34   for (auto it = allocator_table.begin(); it != allocator_table.end(); ) {
35     if (it->first != nnrt_allocator) {
36       it = allocator_table.erase(it);
37     } else {
38       ++it;
39     }
40   }
41 #else
42   allocator_table.clear();
43 #endif
44 }
45 
OH_AI_TensorCreate(const char * name,OH_AI_DataType type,const int64_t * shape,size_t shape_num,const void * data,size_t data_len)46 OH_AI_TensorHandle OH_AI_TensorCreate(const char *name, OH_AI_DataType type, const int64_t *shape, size_t shape_num,
47                                       const void *data, size_t data_len) {
48   if (name == nullptr || shape == nullptr) {
49     MS_LOG(ERROR) << "param is nullptr.";
50     return nullptr;
51   }
52   if (*name == '\0') {
53     MS_LOG(ERROR) << "tensor name is empty.";
54     return nullptr;
55   }
56   std::vector<int32_t> vec_shape(shape_num);
57   for (size_t i = 0; i < shape_num; i++) {
58     vec_shape[i] = shape[i];
59   }
60   auto lite_tensor =
61     mindspore::lite::Tensor::CreateTensor(name, static_cast<mindspore::TypeId>(type), vec_shape, data, data_len);
62   auto lite_tensor_impl = std::make_shared<mindspore::LiteTensorImpl>(lite_tensor);
63   if (lite_tensor_impl == nullptr || lite_tensor_impl->lite_tensor() == nullptr) {
64     MS_LOG(ERROR) << "Failed to allocate tensor impl.";
65     return nullptr;
66   }
67   lite_tensor_impl->set_from_session(false);
68   lite_tensor_impl->set_own_data(lite_tensor_impl->lite_tensor()->own_data());
69   auto impl = new (std::nothrow) mindspore::MSTensor(lite_tensor_impl);
70   if (impl == nullptr) {
71     MS_LOG(ERROR) << "Failed to allocate MSTensor.";
72     return nullptr;
73   }
74   return impl;
75 }
76 
OH_AI_TensorDestroy(OH_AI_TensorHandle * tensor)77 void OH_AI_TensorDestroy(OH_AI_TensorHandle *tensor) {
78   if (tensor == nullptr || *tensor == nullptr) {
79     MS_LOG(ERROR) << "tensor is nullptr.";
80     return;
81   }
82   auto impl = static_cast<mindspore::MSTensor *>(*tensor);
83   delete impl;
84   *tensor = nullptr;
85 }
86 
OH_AI_TensorClone(OH_AI_TensorHandle tensor)87 OH_AI_TensorHandle OH_AI_TensorClone(OH_AI_TensorHandle tensor) {
88   if (tensor == nullptr) {
89     MS_LOG(ERROR) << "param is nullptr.";
90     return nullptr;
91   }
92   auto impl = static_cast<mindspore::MSTensor *>(tensor);
93   auto clone_impl = impl->Clone();
94   if (clone_impl == nullptr) {
95     MS_LOG(ERROR) << "Failed to allocate tensor impl.";
96     return nullptr;
97   }
98   std::static_pointer_cast<mindspore::LiteTensorImpl>(clone_impl->impl())->set_own_data(false);
99   clone_impl->SetTensorName(impl->Name() + "_duplicate");
100   return clone_impl;
101 }
102 
OH_AI_TensorSetName(OH_AI_TensorHandle tensor,const char * name)103 void OH_AI_TensorSetName(OH_AI_TensorHandle tensor, const char *name) {
104   if (tensor == nullptr || name == nullptr) {
105     MS_LOG(ERROR) << "param is nullptr.";
106     return;
107   }
108   if (*name == '\0') {
109     MS_LOG(ERROR) << "tensor name is empty.";
110     return;
111   }
112   auto impl = static_cast<mindspore::MSTensor *>(tensor);
113   impl->SetTensorName(name);
114 }
115 
OH_AI_TensorGetName(const OH_AI_TensorHandle tensor)116 const char *OH_AI_TensorGetName(const OH_AI_TensorHandle tensor) {
117   if (tensor == nullptr) {
118     MS_LOG(ERROR) << "param is nullptr.";
119     return nullptr;
120   }
121   auto ms_tensor = static_cast<mindspore::MSTensor *>(tensor);
122   return std::static_pointer_cast<mindspore::LiteTensorImpl>(ms_tensor->impl())->Name().c_str();
123 }
124 
OH_AI_TensorSetDataType(OH_AI_TensorHandle tensor,OH_AI_DataType type)125 void OH_AI_TensorSetDataType(OH_AI_TensorHandle tensor, OH_AI_DataType type) {
126   if (tensor == nullptr) {
127     MS_LOG(ERROR) << "param is nullptr.";
128     return;
129   }
130   auto impl = static_cast<mindspore::MSTensor *>(tensor);
131   impl->SetDataType(static_cast<mindspore::DataType>(type));
132 }
133 
OH_AI_TensorGetDataType(const OH_AI_TensorHandle tensor)134 OH_AI_DataType OH_AI_TensorGetDataType(const OH_AI_TensorHandle tensor) {
135   if (tensor == nullptr) {
136     MS_LOG(ERROR) << "param is nullptr.";
137     return OH_AI_DATATYPE_UNKNOWN;
138   }
139   auto impl = static_cast<mindspore::MSTensor *>(tensor);
140   auto dtype = impl->DataType();
141   return static_cast<OH_AI_DataType>(dtype);
142 }
143 
OH_AI_TensorSetShape(OH_AI_TensorHandle tensor,const int64_t * shape,size_t shape_num)144 void OH_AI_TensorSetShape(OH_AI_TensorHandle tensor, const int64_t *shape, size_t shape_num) {
145   if (tensor == nullptr || shape == nullptr) {
146     MS_LOG(ERROR) << "param is nullptr.";
147     return;
148   }
149   auto impl = static_cast<mindspore::MSTensor *>(tensor);
150   std::vector<int64_t> vec_shape(shape_num);
151   for (size_t i = 0; i < shape_num; i++) {
152     vec_shape[i] = shape[i];
153   }
154   impl->SetShape(vec_shape);
155 }
156 
OH_AI_TensorGetShape(const OH_AI_TensorHandle tensor,size_t * shape_num)157 const int64_t *OH_AI_TensorGetShape(const OH_AI_TensorHandle tensor, size_t *shape_num) {
158   if (tensor == nullptr) {
159     MS_LOG(ERROR) << "param is nullptr.";
160     return nullptr;
161   }
162   auto impl = static_cast<mindspore::MSTensor *>(tensor);
163   *shape_num = impl->Shape().size();
164   return impl->Shape().data();
165 }
166 
OH_AI_TensorSetFormat(OH_AI_TensorHandle tensor,OH_AI_Format format)167 void OH_AI_TensorSetFormat(OH_AI_TensorHandle tensor, OH_AI_Format format) {
168   if (tensor == nullptr) {
169     MS_LOG(ERROR) << "param is nullptr.";
170     return;
171   }
172   auto impl = static_cast<mindspore::MSTensor *>(tensor);
173   return impl->SetFormat(static_cast<mindspore::Format>(format));
174 }
175 
OH_AI_TensorGetFormat(const OH_AI_TensorHandle tensor)176 OH_AI_Format OH_AI_TensorGetFormat(const OH_AI_TensorHandle tensor) {
177   if (tensor == nullptr) {
178     MS_LOG(ERROR) << "param is nullptr.";
179     return OH_AI_FORMAT_NHWC;
180   }
181   auto impl = static_cast<mindspore::MSTensor *>(tensor);
182   return static_cast<OH_AI_Format>(impl->format());
183 }
184 
OH_AI_TensorSetData(OH_AI_TensorHandle tensor,void * data)185 void OH_AI_TensorSetData(OH_AI_TensorHandle tensor, void *data) {
186   if (tensor == nullptr) {
187     MS_LOG(ERROR) << "param is nullptr.";
188     return;
189   }
190   auto impl = static_cast<mindspore::MSTensor *>(tensor);
191   return impl->SetData(data, true);
192 }
193 
OH_AI_TensorSetUserData(OH_AI_TensorHandle tensor,void * data,size_t data_size)194 OH_AI_Status OH_AI_TensorSetUserData(OH_AI_TensorHandle tensor, void *data, size_t data_size) {
195   if (tensor == nullptr) {
196     MS_LOG(ERROR) << "param is nullptr.";
197     return OH_AI_STATUS_LITE_NULLPTR;
198   }
199 
200   auto impl = static_cast<mindspore::MSTensor *>(tensor);
201   if ((impl->DataSize() > 0) && (data_size != impl->DataSize())) {
202     MS_LOG(ERROR) << "input data size does not match inner data size";
203     return OH_AI_STATUS_LITE_PARAM_INVALID;
204   }
205 
206   // This is one tricky way to represent that the inner data is not owned by tensor itself.
207   impl->SetAllocator(nullptr);
208   impl->SetData(data, false);
209   return OH_AI_STATUS_SUCCESS;
210 }
211 
OH_AI_TensorGetData(const OH_AI_TensorHandle tensor)212 const void *OH_AI_TensorGetData(const OH_AI_TensorHandle tensor) {
213   if (tensor == nullptr) {
214     MS_LOG(ERROR) << "param is nullptr.";
215     return nullptr;
216   }
217   auto impl = static_cast<mindspore::MSTensor *>(tensor);
218   return impl->Data().get();
219 }
220 
OH_AI_TensorGetMutableData(const OH_AI_TensorHandle tensor)221 void *OH_AI_TensorGetMutableData(const OH_AI_TensorHandle tensor) {
222   if (tensor == nullptr) {
223     MS_LOG(ERROR) << "param is nullptr.";
224     return nullptr;
225   }
226   auto impl = static_cast<mindspore::MSTensor *>(tensor);
227   return impl->MutableData();
228 }
229 
OH_AI_TensorGetElementNum(const OH_AI_TensorHandle tensor)230 int64_t OH_AI_TensorGetElementNum(const OH_AI_TensorHandle tensor) {
231   if (tensor == nullptr) {
232     MS_LOG(ERROR) << "param is nullptr.";
233     return 0;
234   }
235   auto impl = static_cast<mindspore::MSTensor *>(tensor);
236   return impl->ElementNum();
237 }
238 
OH_AI_TensorGetDataSize(const OH_AI_TensorHandle tensor)239 size_t OH_AI_TensorGetDataSize(const OH_AI_TensorHandle tensor) {
240   if (tensor == nullptr) {
241     MS_LOG(ERROR) << "param is nullptr.";
242     return 0;
243   }
244   auto impl = static_cast<mindspore::MSTensor *>(tensor);
245   return impl->DataSize();
246 }
247 
OH_AI_TensorSetAllocator(OH_AI_TensorHandle tensor,void * allocator)248 OH_AI_Status OH_AI_TensorSetAllocator(OH_AI_TensorHandle tensor, void *allocator) {
249   if (tensor == nullptr) {
250     MS_LOG(ERROR) << "param is nullptr.";
251     return OH_AI_STATUS_LITE_NULLPTR;
252   }
253   auto impl = static_cast<mindspore::MSTensor *>(tensor);
254   std::lock_guard<std::mutex> lock(allocator_table_mutex);
255   if (allocator_table.count(allocator) == 0) {
256     MS_LOG(ERROR) << "the input allocator does not belong to framework";
257     return OH_AI_STATUS_LITE_PARAM_INVALID;
258   }
259   std::static_pointer_cast<mindspore::LiteTensorImpl>(impl->impl())->set_own_data(true);
260   auto allocator_ptr = allocator_table[allocator].lock();
261   if (allocator_ptr != nullptr) {
262     impl->SetAllocator(allocator_ptr);
263   } else {
264     MS_LOG(ERROR) << "get allocator shared ptr failed.";
265     return OH_AI_STATUS_LITE_NULLPTR;
266   }
267   return OH_AI_STATUS_SUCCESS;
268 }
269 
OH_AI_TensorGetAllocator(OH_AI_TensorHandle tensor)270 void *OH_AI_TensorGetAllocator(OH_AI_TensorHandle tensor) {
271   if (tensor == nullptr) {
272     MS_LOG(ERROR) << "param is nullptr.";
273     return nullptr;
274   }
275   auto impl = static_cast<mindspore::MSTensor *>(tensor);
276   std::lock_guard<std::mutex> lock(allocator_table_mutex);
277   allocator_table[impl->allocator().get()] = impl->allocator();
278   return impl->allocator().get();
279 }
280