• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/litert/cxx_api/tensor/tensor_impl.h"
18 #include <cstddef>
19 #include <numeric>
20 #include <memory>
21 #include <algorithm>
22 #include <string>
23 #include <vector>
24 #include <functional>
25 #include "src/litert/cxx_api/tensor_utils.h"
26 #include "src/tensor.h"
27 #include "src/common/string_utils.h"
28 #ifdef ENABLE_CLOUD_INFERENCE
29 #include "src/extendrt/kernel/ascend/plugin/ascend_allocator_plugin.h"
30 #endif
31 namespace mindspore {
32 using mindspore::lite::RET_OK;
33 
CreateTensorImpl(const std::string & name,enum DataType type,const std::vector<int64_t> & shape,const void * data,size_t data_len)34 std::shared_ptr<LiteTensorImpl> LiteTensorImpl::CreateTensorImpl(const std::string &name, enum DataType type,
35                                                                  const std::vector<int64_t> &shape, const void *data,
36                                                                  size_t data_len) {
37   std::vector<int32_t> truncated_shape;
38   if (data_len == 0) {
39     truncated_shape = TruncateShape(shape, static_cast<enum TypeId>(type), data_len, false);
40   } else {
41     truncated_shape = TruncateShape(shape, static_cast<enum TypeId>(type), data_len, true);
42   }
43   if (truncated_shape.empty() && !(shape.empty())) {
44     MS_LOG(ERROR) << "Invalid shape for creating tensor.";
45     return nullptr;
46   }
47   auto lite_tensor = lite::Tensor::CreateTensor(name, static_cast<enum TypeId>(type), truncated_shape, data, data_len);
48   if (lite_tensor == nullptr) {
49     MS_LOG(ERROR) << "Failed to allocate lite tensor.";
50     return nullptr;
51   }
52   auto impl = std::make_shared<LiteTensorImpl>(lite_tensor);
53   if (impl == nullptr) {
54     MS_LOG(ERROR) << "Failed to allocate tensor impl.";
55     return nullptr;
56   }
57   impl->set_from_session(false);
58   return impl;
59 }
60 
CreateTensorImplByDeepCopy(const std::string & name,enum DataType type,const std::vector<int64_t> & shape,const void * data,size_t data_len)61 std::shared_ptr<LiteTensorImpl> LiteTensorImpl::CreateTensorImplByDeepCopy(const std::string &name, enum DataType type,
62                                                                            const std::vector<int64_t> &shape,
63                                                                            const void *data, size_t data_len) {
64   std::vector<int32_t> truncated_shape;
65   truncated_shape = TruncateShape(shape, static_cast<enum TypeId>(type), data_len, false);
66   auto lite_tensor =
67     lite::Tensor::CreateTensorByDeepCopy(name, static_cast<enum TypeId>(type), truncated_shape, data, data_len);
68   if (lite_tensor == nullptr) {
69     MS_LOG(ERROR) << "Failed to allocate lite tensor.";
70     return nullptr;
71   }
72   auto impl = std::make_shared<LiteTensorImpl>(lite_tensor);
73   if (impl == nullptr) {
74     MS_LOG(ERROR) << "Failed to allocate tensor impl.";
75     return nullptr;
76   }
77   impl->set_from_session(false);
78   impl->set_own_data(true);
79   return impl;
80 }
81 
SetDeviceData(void * data)82 void LiteTensorImpl::SetDeviceData(void *data) {
83   if (lite_tensor_ == nullptr) {
84     MS_LOG(ERROR) << "Invalid tensor.";
85     return;
86   }
87 #ifdef ENABLE_CLOUD_INFERENCE
88   if (GetDeviceData() != nullptr && own_data_) {
89     MS_LOG(INFO) << "free device data in tensor impl.";
90     kernel::AscendAllocatorPlugin::GetInstance().Free(GetDeviceData(), GetDeviceId());
91   }
92 #endif
93   lite_tensor_->set_device_data(data);
94   if (data != nullptr) {
95     own_data_ = false;
96   }
97 }
98 
GetDeviceData()99 void *LiteTensorImpl::GetDeviceData() {
100   if (lite_tensor_ == nullptr) {
101     MS_LOG(ERROR) << "Invalid tensor.";
102     return nullptr;
103   }
104   return lite_tensor_->device_data();
105 }
106 
107 #ifndef STRING_KERNEL_CLIP
StringsToTensorImpl(const std::string & name,const std::vector<std::string> & str)108 std::shared_ptr<LiteTensorImpl> LiteTensorImpl::StringsToTensorImpl(const std::string &name,
109                                                                     const std::vector<std::string> &str) {
110   auto lite_tensor = new (std::nothrow) lite::Tensor();
111   if (lite_tensor == nullptr) {
112     MS_LOG(ERROR) << "Failed to allocate lite tensor.";
113     return nullptr;
114   }
115   lite_tensor->set_tensor_name(name);
116   std::vector<lite::StringPack> all_pack;
117   for (auto &input : str) {
118     lite::StringPack pack = {static_cast<int>(input.length()), input.data()};
119     all_pack.push_back(pack);
120   }
121   auto ret = lite::WriteStringsToTensor(lite_tensor, all_pack);
122   if (ret != RET_OK) {
123     MS_LOG(ERROR) << "Convert strings to tensor failed.";
124     delete lite_tensor;
125     return nullptr;
126   }
127   auto impl = std::make_shared<LiteTensorImpl>(lite_tensor);
128   if (impl == nullptr) {
129     delete lite_tensor;
130     MS_LOG(ERROR) << "Failed to allocate tensor impl.";
131     return nullptr;
132   }
133   impl->set_own_data(true);
134   impl->set_from_session(false);
135   return impl;
136 }
137 
TensorImplToStrings(const std::shared_ptr<LiteTensorImpl> & impl)138 std::vector<std::string> LiteTensorImpl::TensorImplToStrings(const std::shared_ptr<LiteTensorImpl> &impl) {
139   std::vector<std::string> empty;
140   auto lite_tensor = impl->lite_tensor();
141   if (lite_tensor == nullptr) {
142     MS_LOG(ERROR) << "Invalid tensor impl.";
143     return empty;
144   }
145   const void *ptr = lite_tensor->data();
146   std::vector<lite::StringPack> all_pack = lite::ParseStringBuffer(ptr);
147   std::vector<std::string> result(all_pack.size());
148   std::transform(all_pack.begin(), all_pack.end(), result.begin(), [](lite::StringPack &pack) {
149     std::string str(pack.data, pack.len);
150     return str;
151   });
152   return result;
153 }
154 #endif
155 }  // namespace mindspore
156