1 /**
2 * Copyright 2021 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <unordered_map>
17 #include "include/c_api/tensor_c.h"
18 #include "include/api/status.h"
19 #include "src/tensor.h"
20 #include "src/litert/cxx_api/tensor/tensor_impl.h"
21 #ifdef SUPPORT_NNRT
22 #include "src/litert/delegate/nnrt/nnrt_allocator.h"
23 #endif
24
25 // allocator_table contains mapping of raw_ptr to weak_ptr of Allocator, allocator_table_mutex is used in multi-thread
26 // scene when user build multiple models, to avoid read/write unordered_map conflicts crash.
27 static std::unordered_map<void *, std::weak_ptr<mindspore::Allocator>> allocator_table;
28 static std::mutex allocator_table_mutex;
29
CleanAllocatorTable()30 void CleanAllocatorTable() {
31 std::lock_guard<std::mutex> lock(allocator_table_mutex);
32 #ifdef SUPPORT_NNRT
33 auto nnrt_allocator = mindspore::lite::NNRTAllocator::GetInstance().get();
34 for (auto it = allocator_table.begin(); it != allocator_table.end(); ) {
35 if (it->first != nnrt_allocator) {
36 it = allocator_table.erase(it);
37 } else {
38 ++it;
39 }
40 }
41 #else
42 allocator_table.clear();
43 #endif
44 }
45
OH_AI_TensorCreate(const char * name,OH_AI_DataType type,const int64_t * shape,size_t shape_num,const void * data,size_t data_len)46 OH_AI_TensorHandle OH_AI_TensorCreate(const char *name, OH_AI_DataType type, const int64_t *shape, size_t shape_num,
47 const void *data, size_t data_len) {
48 if (name == nullptr || shape == nullptr) {
49 MS_LOG(ERROR) << "param is nullptr.";
50 return nullptr;
51 }
52 std::vector<int32_t> vec_shape(shape_num);
53 for (size_t i = 0; i < shape_num; i++) {
54 vec_shape[i] = shape[i];
55 }
56 auto lite_tensor =
57 mindspore::lite::Tensor::CreateTensor(name, static_cast<mindspore::TypeId>(type), vec_shape, data, data_len);
58 auto lite_tensor_impl = std::make_shared<mindspore::LiteTensorImpl>(lite_tensor);
59 if (lite_tensor_impl == nullptr || lite_tensor_impl->lite_tensor() == nullptr) {
60 MS_LOG(ERROR) << "Failed to allocate tensor impl.";
61 return nullptr;
62 }
63 lite_tensor_impl->set_from_session(false);
64 lite_tensor_impl->set_own_data(lite_tensor_impl->lite_tensor()->own_data());
65 auto impl = new (std::nothrow) mindspore::MSTensor(lite_tensor_impl);
66 if (impl == nullptr) {
67 MS_LOG(ERROR) << "Failed to allocate MSTensor.";
68 return nullptr;
69 }
70 return impl;
71 }
72
OH_AI_TensorDestroy(OH_AI_TensorHandle * tensor)73 void OH_AI_TensorDestroy(OH_AI_TensorHandle *tensor) {
74 if (tensor == nullptr || *tensor == nullptr) {
75 MS_LOG(ERROR) << "tensor is nullptr.";
76 return;
77 }
78 auto impl = static_cast<mindspore::MSTensor *>(*tensor);
79 delete impl;
80 *tensor = nullptr;
81 }
82
OH_AI_TensorClone(OH_AI_TensorHandle tensor)83 OH_AI_TensorHandle OH_AI_TensorClone(OH_AI_TensorHandle tensor) {
84 if (tensor == nullptr) {
85 MS_LOG(ERROR) << "param is nullptr.";
86 return nullptr;
87 }
88 auto impl = static_cast<mindspore::MSTensor *>(tensor);
89 auto clone_impl = impl->Clone();
90 if (clone_impl == nullptr) {
91 MS_LOG(ERROR) << "Failed to allocate tensor impl.";
92 return nullptr;
93 }
94 std::static_pointer_cast<mindspore::LiteTensorImpl>(clone_impl->impl())->set_own_data(true);
95 clone_impl->SetTensorName(impl->Name() + "_duplicate");
96 return clone_impl;
97 }
98
OH_AI_TensorSetName(OH_AI_TensorHandle tensor,const char * name)99 void OH_AI_TensorSetName(OH_AI_TensorHandle tensor, const char *name) {
100 if (tensor == nullptr || name == nullptr) {
101 MS_LOG(ERROR) << "param is nullptr.";
102 return;
103 }
104 auto impl = static_cast<mindspore::MSTensor *>(tensor);
105 impl->SetTensorName(name);
106 }
107
OH_AI_TensorGetName(const OH_AI_TensorHandle tensor)108 const char *OH_AI_TensorGetName(const OH_AI_TensorHandle tensor) {
109 if (tensor == nullptr) {
110 MS_LOG(ERROR) << "param is nullptr.";
111 return nullptr;
112 }
113 auto ms_tensor = static_cast<mindspore::MSTensor *>(tensor);
114 return std::static_pointer_cast<mindspore::LiteTensorImpl>(ms_tensor->impl())->Name().c_str();
115 }
116
OH_AI_TensorSetDataType(OH_AI_TensorHandle tensor,OH_AI_DataType type)117 void OH_AI_TensorSetDataType(OH_AI_TensorHandle tensor, OH_AI_DataType type) {
118 if (tensor == nullptr) {
119 MS_LOG(ERROR) << "param is nullptr.";
120 return;
121 }
122 auto impl = static_cast<mindspore::MSTensor *>(tensor);
123 impl->SetDataType(static_cast<mindspore::DataType>(type));
124 }
125
OH_AI_TensorGetDataType(const OH_AI_TensorHandle tensor)126 OH_AI_DataType OH_AI_TensorGetDataType(const OH_AI_TensorHandle tensor) {
127 if (tensor == nullptr) {
128 MS_LOG(ERROR) << "param is nullptr.";
129 return OH_AI_DATATYPE_UNKNOWN;
130 }
131 auto impl = static_cast<mindspore::MSTensor *>(tensor);
132 auto dtype = impl->DataType();
133 return static_cast<OH_AI_DataType>(dtype);
134 }
135
OH_AI_TensorSetShape(OH_AI_TensorHandle tensor,const int64_t * shape,size_t shape_num)136 void OH_AI_TensorSetShape(OH_AI_TensorHandle tensor, const int64_t *shape, size_t shape_num) {
137 if (tensor == nullptr || shape == nullptr) {
138 MS_LOG(ERROR) << "param is nullptr.";
139 return;
140 }
141 auto impl = static_cast<mindspore::MSTensor *>(tensor);
142 std::vector<int64_t> vec_shape(shape_num);
143 for (size_t i = 0; i < shape_num; i++) {
144 vec_shape[i] = shape[i];
145 }
146 impl->SetShape(vec_shape);
147 }
148
OH_AI_TensorGetShape(const OH_AI_TensorHandle tensor,size_t * shape_num)149 const int64_t *OH_AI_TensorGetShape(const OH_AI_TensorHandle tensor, size_t *shape_num) {
150 if (tensor == nullptr) {
151 MS_LOG(ERROR) << "param is nullptr.";
152 return nullptr;
153 }
154 auto impl = static_cast<mindspore::MSTensor *>(tensor);
155 *shape_num = impl->Shape().size();
156 return impl->Shape().data();
157 }
158
OH_AI_TensorSetFormat(OH_AI_TensorHandle tensor,OH_AI_Format format)159 void OH_AI_TensorSetFormat(OH_AI_TensorHandle tensor, OH_AI_Format format) {
160 if (tensor == nullptr) {
161 MS_LOG(ERROR) << "param is nullptr.";
162 return;
163 }
164 auto impl = static_cast<mindspore::MSTensor *>(tensor);
165 return impl->SetFormat(static_cast<mindspore::Format>(format));
166 }
167
OH_AI_TensorGetFormat(const OH_AI_TensorHandle tensor)168 OH_AI_Format OH_AI_TensorGetFormat(const OH_AI_TensorHandle tensor) {
169 if (tensor == nullptr) {
170 MS_LOG(ERROR) << "param is nullptr.";
171 return OH_AI_FORMAT_NHWC;
172 }
173 auto impl = static_cast<mindspore::MSTensor *>(tensor);
174 return static_cast<OH_AI_Format>(impl->format());
175 }
176
OH_AI_TensorSetData(OH_AI_TensorHandle tensor,void * data)177 void OH_AI_TensorSetData(OH_AI_TensorHandle tensor, void *data) {
178 if (tensor == nullptr) {
179 MS_LOG(ERROR) << "param is nullptr.";
180 return;
181 }
182 auto impl = static_cast<mindspore::MSTensor *>(tensor);
183 return impl->SetData(data, true);
184 }
185
OH_AI_TensorSetUserData(OH_AI_TensorHandle tensor,void * data,size_t data_size)186 OH_AI_Status OH_AI_TensorSetUserData(OH_AI_TensorHandle tensor, void *data, size_t data_size) {
187 if (tensor == nullptr) {
188 MS_LOG(ERROR) << "param is nullptr.";
189 return OH_AI_STATUS_LITE_NULLPTR;
190 }
191
192 auto impl = static_cast<mindspore::MSTensor *>(tensor);
193 if ((impl->DataSize() > 0) && (data_size != impl->DataSize())) {
194 MS_LOG(ERROR) << "input data size does not match inner data size";
195 return OH_AI_STATUS_LITE_PARAM_INVALID;
196 }
197
198 // This is one tricky way to represent that the inner data is not owned by tensor itself.
199 impl->SetAllocator(nullptr);
200 impl->SetData(data, false);
201 return OH_AI_STATUS_SUCCESS;
202 }
203
OH_AI_TensorGetData(const OH_AI_TensorHandle tensor)204 const void *OH_AI_TensorGetData(const OH_AI_TensorHandle tensor) {
205 if (tensor == nullptr) {
206 MS_LOG(ERROR) << "param is nullptr.";
207 return nullptr;
208 }
209 auto impl = static_cast<mindspore::MSTensor *>(tensor);
210 return impl->Data().get();
211 }
212
OH_AI_TensorGetMutableData(const OH_AI_TensorHandle tensor)213 void *OH_AI_TensorGetMutableData(const OH_AI_TensorHandle tensor) {
214 if (tensor == nullptr) {
215 MS_LOG(ERROR) << "param is nullptr.";
216 return nullptr;
217 }
218 auto impl = static_cast<mindspore::MSTensor *>(tensor);
219 return impl->MutableData();
220 }
221
OH_AI_TensorGetElementNum(const OH_AI_TensorHandle tensor)222 int64_t OH_AI_TensorGetElementNum(const OH_AI_TensorHandle tensor) {
223 if (tensor == nullptr) {
224 MS_LOG(ERROR) << "param is nullptr.";
225 return 0;
226 }
227 auto impl = static_cast<mindspore::MSTensor *>(tensor);
228 return impl->ElementNum();
229 }
230
OH_AI_TensorGetDataSize(const OH_AI_TensorHandle tensor)231 size_t OH_AI_TensorGetDataSize(const OH_AI_TensorHandle tensor) {
232 if (tensor == nullptr) {
233 MS_LOG(ERROR) << "param is nullptr.";
234 return 0;
235 }
236 auto impl = static_cast<mindspore::MSTensor *>(tensor);
237 return impl->DataSize();
238 }
239
OH_AI_TensorSetAllocator(OH_AI_TensorHandle tensor,void * allocator)240 OH_AI_Status OH_AI_TensorSetAllocator(OH_AI_TensorHandle tensor, void *allocator) {
241 if (tensor == nullptr) {
242 MS_LOG(ERROR) << "param is nullptr.";
243 return OH_AI_STATUS_LITE_NULLPTR;
244 }
245 auto impl = static_cast<mindspore::MSTensor *>(tensor);
246 std::lock_guard<std::mutex> lock(allocator_table_mutex);
247 if (allocator_table.count(allocator) == 0) {
248 MS_LOG(ERROR) << "the input allocator does not belong to framework";
249 return OH_AI_STATUS_LITE_PARAM_INVALID;
250 }
251 auto allocator_ptr = allocator_table[allocator].lock();
252 if (allocator_ptr != nullptr) {
253 impl->SetAllocator(allocator_ptr);
254 } else {
255 MS_LOG(ERROR) << "get allocator shared ptr failed.";
256 return OH_AI_STATUS_LITE_NULLPTR;
257 }
258 std::static_pointer_cast<mindspore::LiteTensorImpl>(impl->impl())->set_own_data(true);
259 return OH_AI_STATUS_SUCCESS;
260 }
261
OH_AI_TensorGetAllocator(OH_AI_TensorHandle tensor)262 void *OH_AI_TensorGetAllocator(OH_AI_TensorHandle tensor) {
263 if (tensor == nullptr) {
264 MS_LOG(ERROR) << "param is nullptr.";
265 return nullptr;
266 }
267 auto impl = static_cast<mindspore::MSTensor *>(tensor);
268 std::lock_guard<std::mutex> lock(allocator_table_mutex);
269 allocator_table[impl->allocator().get()] = impl->allocator();
270 return impl->allocator().get();
271 }
272