• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/cxx_api/tensor/tensor_impl.h"
18 #include <cstddef>
19 #include <numeric>
20 #include <memory>
21 #include <algorithm>
22 #include <string>
23 #include <vector>
24 #include <functional>
25 #include "src/cxx_api/tensor_utils.h"
26 #include "src/tensor.h"
27 #include "include/lite_utils.h"
28 
29 namespace mindspore {
30 using mindspore::lite::RET_OK;
31 
CreateTensorImpl(const std::string & name,enum DataType type,const std::vector<int64_t> & shape,const void * data,size_t data_len)32 std::shared_ptr<MSTensor::Impl> MSTensor::Impl::CreateTensorImpl(const std::string &name, enum DataType type,
33                                                                  const std::vector<int64_t> &shape, const void *data,
34                                                                  size_t data_len) {
35   std::vector<int32_t> truncated_shape;
36   if (data_len == 0) {
37     truncated_shape = TruncateShape(shape, static_cast<enum TypeId>(type), data_len, false);
38   } else {
39     truncated_shape = TruncateShape(shape, static_cast<enum TypeId>(type), data_len, true);
40   }
41   if (truncated_shape.empty() && !(shape.empty())) {
42     MS_LOG(ERROR) << "Invalid shape for creating tensor.";
43     return nullptr;
44   }
45   auto lite_tensor = lite::Tensor::CreateTensor(name, static_cast<enum TypeId>(type), truncated_shape, data, data_len);
46   if (lite_tensor == nullptr) {
47     MS_LOG(ERROR) << "Failed to allocate lite tensor.";
48     return nullptr;
49   }
50   auto impl = std::shared_ptr<MSTensor::Impl>(new (std::nothrow) Impl(lite_tensor));
51   if (impl == nullptr) {
52     MS_LOG(ERROR) << "Failed to allocate tensor impl.";
53     return nullptr;
54   }
55   impl->set_from_session(false);
56   return impl;
57 }
58 
59 #ifndef STRING_KERNEL_CLIP
StringsToTensorImpl(const std::string & name,const std::vector<std::string> & str)60 std::shared_ptr<MSTensor::Impl> MSTensor::Impl::StringsToTensorImpl(const std::string &name,
61                                                                     const std::vector<std::string> &str) {
62   auto lite_tensor = new (std::nothrow) lite::Tensor();
63   if (lite_tensor == nullptr) {
64     MS_LOG(ERROR) << "Failed to allocate lite tensor.";
65     return nullptr;
66   }
67   lite_tensor->set_tensor_name(name);
68   auto ret = lite::StringsToMSTensor(str, lite_tensor);
69   if (ret != RET_OK) {
70     MS_LOG(ERROR) << "Convert strings to tensor failed.";
71     delete lite_tensor;
72     return nullptr;
73   }
74   auto impl = std::shared_ptr<MSTensor::Impl>(new (std::nothrow) Impl(lite_tensor));
75   if (impl == nullptr) {
76     delete lite_tensor;
77     MS_LOG(ERROR) << "Failed to allocate tensor impl.";
78     return nullptr;
79   }
80   impl->set_own_data(true);
81   impl->set_from_session(false);
82   return impl;
83 }
84 
TensorImplToStrings(const std::shared_ptr<Impl> & impl)85 std::vector<std::string> MSTensor::Impl::TensorImplToStrings(const std::shared_ptr<Impl> &impl) {
86   std::vector<std::string> empty;
87   auto lite_tensor = impl->lite_tensor();
88   if (lite_tensor == nullptr) {
89     MS_LOG(ERROR) << "Invalid tensor impl.";
90     return empty;
91   }
92   return lite::MSTensorToStrings(lite_tensor);
93 }
94 #endif
95 }  // namespace mindspore
96