• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_LITE_SRC_TENSOR_H_
18 #define MINDSPORE_LITE_SRC_TENSOR_H_
19 
20 #include <math.h>
21 #include <memory>
22 #include <vector>
23 #include <string>
24 #include <numeric>
25 #include <functional>
26 #include <atomic>
27 #include "include/ms_tensor.h"
28 #include "include/api/format.h"
29 #include "src/runtime/inner_allocator.h"
30 #include "src/common/log_adapter.h"
31 #include "schema/model_generated.h"
32 #include "src/common/utils.h"
33 
34 namespace mindspore {
35 namespace lite {
36 
37 #define STATIC_ALLOCATION -271964
38 #define RUNTIME_REFCOUNT 0x9999
39 #define IS_STATIC_ALLOCATOR(allocator) ((allocator != nullptr) && (allocator->RefCount(nullptr) == STATIC_ALLOCATION))
40 #define IS_RUNTIME_ALLOCATOR(allocator) ((allocator != nullptr) && (allocator->RefCount(nullptr) == RUNTIME_REFCOUNT))
41 
42 struct LiteQuantParam {
43   double scale;
44   int32_t zeroPoint;
45   float var_corr{1};
46   float mean_corr{0};
47   bool inited{false};
48   std::vector<float> clusters{};
49   int bitNum{8};
50   int roundType{1};
51   int multiplier{1};
52   int dstDtype{32};
53 };
54 
55 class Tensor : public mindspore::tensor::MSTensor {
56  public:
57   enum Category {
58     CONST_TENSOR,  // weight tensor
59     CONST_SCALAR,  // weight scalar
60     VAR,           // activation tensor
61     GRAPH_INPUT,
62     GRAPH_OUTPUT,
63   };
64   Tensor() = default;
65 
66   Tensor(TypeId data_type, std::vector<int> shape, const mindspore::Format &format = mindspore::NHWC,
67          Category category = VAR);
68 
69   Tensor(const Tensor &tensor) = delete;
70 
71   Tensor(Tensor &&other) = delete;
72 
73   Tensor &operator=(const Tensor &tensor) = delete;
74 
75   Tensor &operator=(Tensor &&src) = delete;
76 
77   ~Tensor() override;
78 
79   static int CopyTensorData(const Tensor &src_tensor, Tensor *dst_tensor);
80 
81   static Tensor *CopyTensor(const Tensor &src_tensor, bool copy_data = false, AllocatorPtr allocator = nullptr);
82 
83   virtual bool operator==(const Tensor &tensor);
84 
set_tensor_name(const std::string & name)85   void set_tensor_name(const std::string &name) override { tensor_name_ = name; }
86 
tensor_name()87   std::string tensor_name() const override { return tensor_name_; }
88 
data_type()89   TypeId data_type() const override { return data_type_; }
90 
set_data_type(TypeId data_type)91   void set_data_type(TypeId data_type) override { data_type_ = data_type; }
92 
shape()93   std::vector<int> shape() const override { return shape_; }
94 
set_shape(const std::vector<int> & shape)95   void set_shape(const std::vector<int> &shape) override { shape_ = shape; }
96 
97   int DimensionSize(size_t index) const;
98 
99   int ElementsNum() const override;
100 
101   int32_t Batch() const;
102 
103   int32_t Channel() const;
104 
105   int32_t Height() const;
106 
107   int32_t Width() const;
108 
109   int32_t ElementsC4Num() const;
110 
111   size_t Size() const override;
112 
set_allocator(AllocatorPtr allocator)113   void set_allocator(AllocatorPtr allocator) override { allocator_ = allocator; }
114 
allocator()115   AllocatorPtr allocator() const override { return allocator_; }
116 
117   virtual int MallocData(const AllocatorPtr allocator = nullptr);
118 
119   virtual void FreeData();
120 
121   void *MutableData() override;
122 
123   void *ReallocData();
124 
data()125   void *data() override { return data_; };
126 
data()127   virtual void *data() const { return data_; }
128 
129   // note: in the case of that old_data is valid, set_data just releases the ownership of it but not frees it. Of
130   //       course, you can call FreeData before calling set_data to ensure the data can be freed by current tensor.
set_data(void * data)131   void set_data(void *data) override {
132     if (this->data_ == data) {
133       return;
134     }
135     if (allocator_ != nullptr) {
136       allocator_->IncRefCount(data, 1);
137       allocator_->DecRefCount(this->data_, 1);
138     }
139     this->data_ = data;
140     this->own_data_ = true;
141   }
142 
category()143   Category category() const { return this->category_; }
144 
set_category(Category category)145   void set_category(Category category) { this->category_ = category; }
146 
set_format(mindspore::Format format)147   void set_format(mindspore::Format format) override { this->format_ = format; }
148 
format()149   mindspore::Format format() const override { return this->format_; }
ref_count()150   virtual int ref_count() const { return ref_count_; }
151 
init_ref_count()152   virtual int init_ref_count() const { return this->init_ref_count_; }
153 
set_ref_count(int ref_count)154   virtual void set_ref_count(int ref_count) {
155     ref_count_ = ref_count;
156   }
157 
set_init_ref_count(int ref_count)158   void set_init_ref_count(int ref_count) { this->init_ref_count_ = ref_count; }
159 
ResetRefCount()160   virtual void ResetRefCount() { set_ref_count(static_cast<int>(this->init_ref_count_)); }
161 
162   virtual void IncRefCount();
163 
164   virtual void DecRefCount();
165 
166   std::string ToString() const;
167 
168   void AddQuantParam(const LiteQuantParam &quant_param);
169 
170   std::vector<LiteQuantParam> quant_params() const override;
171 
172   void set_quant_params(std::vector<LiteQuantParam>) override;
173 
174   std::vector<float> quant_clusters() const;
175 
176   void set_quant_clusters(const std::vector<float> &clusters);
177 
IsConst()178   bool IsConst() const override {
179     return (this->category_ == CONST_TENSOR || this->category_ == CONST_SCALAR) && this->data_ != nullptr;
180   }
181 
IsScalar()182   bool IsScalar() const { return this->category_ == CONST_SCALAR && this->data_ != nullptr; }
183 
IsGraphInput()184   bool IsGraphInput() const { return this->category_ == GRAPH_INPUT; }
185 
IsGraphOutput()186   bool IsGraphOutput() const { return this->category_ == GRAPH_OUTPUT; }
187 
Prepare()188   void Prepare() {
189     if (allocator_ != nullptr) {
190       data_ = allocator_->Prepare(data_);
191     }
192   }
193 
IsReady()194   bool IsReady() const {
195     return this->IsConst() || (this->IsGraphInput() && this->data_ != nullptr) || ref_count() >= 1;
196   }
197 
own_data()198   bool own_data() const { return this->own_data_; }
199 
set_own_data(bool own_data)200   virtual void set_own_data(bool own_data) { this->own_data_ = own_data; }
201 
202   template <typename T>
Scale(float scale)203   int Scale(float scale) {
204     T cast_scale = static_cast<T>(scale);
205     auto data = reinterpret_cast<T *>(data_);
206     if (data == nullptr) {
207       return RET_ERROR;
208     }
209     int length = ElementsNum();
210     for (int i = 0; i < length; i++) {
211       data[i] *= cast_scale;
212     }
213     scale_ *= scale;
214     return RET_OK;
215   }
216 
get_scale()217   float get_scale() const { return this->scale_; }
218 
set_scale(float scale)219   void set_scale(float scale) { this->scale_ = scale; }
220 
IsScale()221   bool IsScale() const { return (std::fabs(this->scale_ - 1.0f) > 1.0e-05); }
222 
223  private:
224   template <typename T>
225   std::string DataToString(void *data, size_t data_number, size_t print_len = 40) const {
226     if (data == nullptr) {
227       return "Data of tensor is nullptr";
228     }
229     std::ostringstream oss;
230     auto casted_data = static_cast<T *>(data);
231     for (size_t i = 0; i < print_len && i < data_number; i++) {
232       oss << " " << casted_data[i];
233     }
234     return oss.str();
235   }
236 
237  protected:
238   std::string tensor_name_;
239   void *data_ = nullptr;
240   TypeId data_type_;
241   std::vector<int> shape_;
242   mindspore::Format format_;
243   Category category_;
244   std::atomic_int ref_count_ = {0};
245   size_t init_ref_count_ = 0;
246   std::vector<LiteQuantParam> quant_params_;
247   std::vector<float> quant_clusters_;
248   AllocatorPtr allocator_ = nullptr;
249   bool own_data_{false};
250   float scale_ = 1.0f;
251 };
252 
DataTypeSize(const TypeId type)253 inline size_t DataTypeSize(const TypeId type) {
254   switch (type) {
255     case kNumberTypeFloat64:
256       return sizeof(double);
257     case kNumberTypeFloat:
258     case kNumberTypeFloat32:
259       return sizeof(float);
260     case kNumberTypeInt8:
261       return sizeof(int8_t);
262     case kNumberTypeUInt8:
263       return sizeof(uint8_t);
264     case kNumberTypeFloat16:
265     case kNumberTypeInt16:
266       return sizeof(int16_t);
267     case kNumberTypeInt32:
268       return sizeof(int32_t);
269     case kNumberTypeInt64:
270       return sizeof(int64_t);
271     case kNumberTypeUInt16:
272       return sizeof(uint16_t);
273     case kNumberTypeUInt32:
274       return sizeof(uint32_t);
275     case kNumberTypeUInt64:
276       return sizeof(uint64_t);
277     case kNumberTypeBool:
278       return sizeof(bool);
279     case kObjectTypeString:
280       return sizeof(char);
281     case kObjectTypeTensorType:
282       return 0;
283     default:
284       MS_LOG(ERROR) << "Not support the type: " << type;
285       return 0;
286   }
287 }
288 
TensorCategory(const int node_type,const size_t shape_num,const TypeId data_type,const size_t data_size)289 inline Tensor::Category TensorCategory(const int node_type, const size_t shape_num, const TypeId data_type,
290                                        const size_t data_size) {
291   return ((MSNodeType)node_type == MSNodeType::NodeType_ValueNode)
292            ? (shape_num == 0 && data_size == DataTypeSize(data_type) ? Tensor::Category::CONST_SCALAR
293                                                                      : Tensor::Category::CONST_TENSOR)
294            : Tensor::Category::VAR;
295 }
296 
TensorCategory(const schema::Tensor * tensor)297 inline Tensor::Category TensorCategory(const schema::Tensor *tensor) {
298   if (tensor == nullptr) {
299     MS_LOG(ERROR) << "tensor is nullptr";
300     return Tensor::VAR;
301   }
302   auto shape_num = tensor->dims() == nullptr ? 0 : tensor->dims()->size();
303   auto data_size = tensor->data() == nullptr ? 0 : tensor->data()->size();
304   return TensorCategory(tensor->nodeType(), shape_num, TypeId(tensor->dataType()), data_size);
305 }
306 
307 std::vector<tensor::MSTensor *> TensorVectorCast(const std::vector<Tensor *> &src);
308 }  // namespace lite
309 }  // namespace mindspore
310 
311 using TensorPtr = std::shared_ptr<mindspore::lite::Tensor>;
312 #endif  // MINDSPORE_LITE_SRC_TENSOR_H_
313