1 /** 2 * Copyright 2020-2023 Huawei Technologies Co., Ltd 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef MINDSPORE_LITE_SRC_TENSOR_H_ 18 #define MINDSPORE_LITE_SRC_TENSOR_H_ 19 20 #include <math.h> 21 #include <memory> 22 #include <vector> 23 #include <string> 24 #include <numeric> 25 #include <functional> 26 #include <atomic> 27 #include "include/api/format.h" 28 #include "include/lite_types.h" 29 #include "nnacl/tensor_c.h" 30 #include "nnacl/tensor_c_utils.h" 31 #include "src/litert/inner_allocator.h" 32 #include "src/common/log_adapter.h" 33 #include "src/common/utils.h" 34 #include "src/litert/tensor_category.h" 35 36 namespace mindspore { 37 namespace lite { 38 #define STATIC_ALLOCATION -271964 39 #define RUNTIME_REFCOUNT 0x9999 40 #define OPENCL_ALLOCATOR_REFCOUNT -10000 41 #define NNRT_ALLOCATION -10001 42 #define IS_STATIC_ALLOCATOR(allocator) ((allocator != nullptr) && (allocator->RefCount(nullptr) == STATIC_ALLOCATION)) 43 #define IS_RUNTIME_ALLOCATOR(allocator) ((allocator != nullptr) && (allocator->RefCount(nullptr) == RUNTIME_REFCOUNT)) 44 #define IS_OPENCL_ALLOCATOR(allocator) \ 45 ((allocator != nullptr) && (allocator->RefCount(nullptr) == OPENCL_ALLOCATOR_REFCOUNT)) 46 #define IS_NNRT_ALLOCATOR(allocator) ((allocator != nullptr) && (allocator->RefCount(nullptr) == NNRT_ALLOCATION)) 47 48 struct LiteQuantParam { 49 double scale; 50 int32_t zeroPoint; 51 float var_corr{1}; 52 float mean_corr{0}; 53 bool inited{false}; 54 std::vector<float> clusters{}; 55 int bitNum{8}; 56 int roundType{1}; 57 int multiplier{1}; 58 int dstDtype{32}; 59 // dynamic range 60 double min{-255.0}; 61 double max{255.0}; 62 }; 63 64 enum CompressType { 65 kNoCompression = 0, 66 kIndexing = 1, 67 kSparse = 2, 68 kFSE = 3, 69 kBitPacking = 4, 70 kFSEInt = 5, 71 kFSEInfer = 6 72 }; 73 74 class MS_API Tensor { 75 public: Tensor()76 Tensor() { tensor_c_ = {false, kTypeUnknown, NHWC, VarTensor, nullptr, 0}; } 77 78 Tensor(TypeId data_type, std::vector<int> shape, const mindspore::Format &format = mindspore::NHWC, 79 Category category = VAR); 80 81 Tensor(const Tensor &tensor) = delete; 82 83 Tensor(Tensor &&other) = delete; 84 85 Tensor &operator=(const Tensor &tensor) = delete; 86 87 Tensor &operator=(Tensor &&src) = delete; 88 89 virtual ~Tensor(); 90 91 static Tensor *CreateTensor(const std::string &name, TypeId type, const std::vector<int> &shape, const void *data, 92 size_t data_len); 93 static Tensor *CreateTensorByDeepCopy(const std::string &name, TypeId type, const std::vector<int> &shape, 94 const void *data, size_t data_len); 95 96 static int CopyTensorData(const Tensor &src_tensor, Tensor *dst_tensor); 97 98 static Tensor *CopyTensor(const Tensor &src_tensor, bool copy_data = false, AllocatorPtr allocator = nullptr); 99 100 virtual bool operator==(const Tensor &tensor); 101 set_tensor_name(const std::string & name)102 void set_tensor_name(const std::string &name) { tensor_name_ = name; } 103 tensor_name()104 std::string tensor_name() const { return tensor_name_; } 105 data_type()106 TypeId data_type() const { return static_cast<TypeId>(tensor_c_.data_type_); } 107 set_data_type(TypeId data_type)108 void set_data_type(TypeId data_type) { tensor_c_.data_type_ = data_type; } 109 shape()110 std::vector<int> shape() const { 111 return std::vector<int>(tensor_c_.shape_, tensor_c_.shape_ + tensor_c_.shape_size_); 112 } 113 set_shape(const std::vector<int> & shape)114 void set_shape(const std::vector<int> &shape) { 115 if (shape.size() > MAX_SHAPE_SIZE) { 116 FreeData(); 117 tensor_c_.shape_size_ = 0; 118 MS_LOG(WARNING) << "The shape-size has exceeded the limit 8, now is " << shape.size(); 119 return; 120 } 121 tensor_c_.shape_size_ = shape.size(); 122 for (size_t i = 0; i < shape.size(); ++i) { 123 tensor_c_.shape_[i] = shape[i]; 124 } 125 } 126 shape64()127 std::vector<int64_t> shape64() const { 128 return std::vector<int64_t>(tensor_c_.shape_, tensor_c_.shape_ + tensor_c_.shape_size_); 129 } 130 set_shape64(const std::vector<int64_t> & shape)131 void set_shape64(const std::vector<int64_t> &shape) { 132 if (shape.size() > MAX_SHAPE_SIZE) { 133 FreeData(); 134 tensor_c_.shape_size_ = 0; 135 MS_LOG(WARNING) << "The shape-size has exceeded the limit 8, now is " << shape.size(); 136 return; 137 } 138 tensor_c_.shape_size_ = shape.size(); 139 for (size_t i = 0; i < shape.size(); ++i) { 140 tensor_c_.shape_[i] = shape[i]; 141 } 142 } 143 144 int DimensionSize(size_t index) const; 145 146 int64_t ElementsNum() const; 147 148 int32_t Batch() const; 149 150 int32_t Channel() const; 151 152 int32_t Height() const; 153 154 int32_t Width() const; 155 156 int64_t ElementsC4Num() const; 157 158 int64_t ElementsC8Num() const; 159 virtual size_t Size() const; 160 set_allocator(AllocatorPtr allocator)161 virtual void set_allocator(AllocatorPtr allocator) { allocator_ = allocator; } 162 allocator()163 AllocatorPtr allocator() const { return allocator_; } 164 165 virtual int MallocData(const AllocatorPtr allocator = nullptr); 166 167 virtual void FreeData(); 168 169 virtual void *MutableData(); 170 171 void *ReallocData(); 172 data()173 virtual void *data() { return tensor_c_.data_; } 174 data()175 virtual void *data() const { return tensor_c_.data_; } 176 177 // note: in the case of that old_data is valid, set_data just releases the ownership of it but not frees it. Of 178 // course, you can call FreeData before calling set_data to ensure the data can be freed by current tensor. 179 void set_data(void *data, bool own_data = true) { 180 if (allocator_ != nullptr && this->tensor_c_.data_ != data) { 181 (void)allocator_->IncRefCount(data, 1); 182 (void)allocator_->DecRefCount(this->tensor_c_.data_, 1); 183 } 184 this->tensor_c_.data_ = data; 185 this->own_data_ = own_data; 186 } 187 set_device_data(void * data)188 void set_device_data(void *data) { device_data_ = data; } 189 device_data()190 void *device_data() const { return device_data_; } 191 is_device()192 bool is_device() const { return device_data_ != nullptr; } 193 category()194 Category category() const { return static_cast<Category>(tensor_c_.category_); } 195 set_category(Category category)196 void set_category(Category category) { tensor_c_.category_ = category; } 197 set_format(mindspore::Format format)198 void set_format(mindspore::Format format) { this->tensor_c_.format_ = format; } 199 format()200 mindspore::Format format() const { return static_cast<mindspore::Format>(this->tensor_c_.format_); } ref_count()201 virtual int ref_count() const { return ref_count_; } 202 init_ref_count()203 virtual int init_ref_count() const { return static_cast<int>(this->init_ref_count_); } 204 set_ref_count(int ref_count)205 virtual void set_ref_count(int ref_count) { ref_count_ = ref_count; } 206 set_init_ref_count(int ref_count)207 virtual void set_init_ref_count(int ref_count) { this->init_ref_count_ = ref_count; } 208 ResetRefCount()209 virtual void ResetRefCount() { set_ref_count(static_cast<int>(this->init_ref_count_)); } 210 IncRefCount()211 virtual void IncRefCount() { ++ref_count_; } 212 213 virtual void DecRefCount(); 214 215 std::string ToString() const; 216 217 void AddQuantParam(const LiteQuantParam &quant_param); 218 219 void ClearQuantParam(); 220 221 std::vector<LiteQuantParam> quant_params() const; 222 223 void set_quant_params(std::vector<LiteQuantParam>); 224 225 std::vector<float> quant_clusters() const; 226 227 void set_quant_clusters(const std::vector<float> &clusters); 228 IsConst()229 virtual bool IsConst() const { return ::IsConst(&tensor_c_); } 230 IsScalar()231 bool IsScalar() const { return this->tensor_c_.category_ == CONST_SCALAR && this->tensor_c_.data_ != nullptr; } 232 IsGraphInput()233 bool IsGraphInput() const { return this->tensor_c_.category_ == GRAPH_INPUT; } 234 IsGraphOutput()235 bool IsGraphOutput() const { return this->tensor_c_.category_ == GRAPH_OUTPUT; } 236 Prepare()237 void Prepare() { 238 if (allocator_ != nullptr) { 239 tensor_c_.data_ = allocator_->Prepare(tensor_c_.data_); 240 } 241 } 242 IsReady()243 bool IsReady() const { 244 return this->IsConst() || (this->IsGraphInput() && this->tensor_c_.data_ != nullptr) || ref_count() >= 1; 245 } 246 own_data()247 bool own_data() const { return this->own_data_; } 248 set_own_data(bool own_data)249 virtual void set_own_data(bool own_data) { this->own_data_ = own_data; } 250 251 template <typename T> Scale(float scale)252 int Scale(float scale) { 253 T cast_scale = static_cast<T>(scale); 254 auto data = reinterpret_cast<T *>(tensor_c_.data_); 255 if (data == nullptr) { 256 return RET_ERROR; 257 } 258 int length = ElementsNum(); 259 for (int i = 0; i < length; i++) { 260 data[i] *= cast_scale; 261 } 262 scale_ *= scale; 263 return RET_OK; 264 } 265 get_scale()266 float get_scale() const { return this->scale_; } 267 set_scale(float scale)268 void set_scale(float scale) { this->scale_ = scale; } 269 get_compress_type()270 CompressType get_compress_type() const { return this->compress_type_; } 271 set_compress_type(CompressType compression_type)272 void set_compress_type(CompressType compression_type) { this->compress_type_ = compression_type; } 273 set_compressed_size(size_t compressed_size)274 void set_compressed_size(size_t compressed_size) { this->compressed_size_ = compressed_size; } 275 IsScale()276 bool IsScale() const { return (std::fabs(this->scale_ - 1.0f) > 1.0e-05); } 277 set_shape_changed(bool shape_changed)278 void set_shape_changed(bool shape_changed) { tensor_c_.shape_changed_ = shape_changed; } 279 get_shape_changed()280 bool get_shape_changed() const { return tensor_c_.shape_changed_; } 281 get_device_id()282 int get_device_id() const { return device_id_; } 283 set_device_id(int device_id)284 void set_device_id(int device_id) { device_id_ = device_id; } 285 get_device()286 std::string get_device() { return device_; } 287 set_device(const std::string & device)288 void set_device(const std::string &device) { device_ = device; } 289 ConvertToTensorC()290 TensorC *ConvertToTensorC() { return &tensor_c_; } 291 292 private: 293 template <typename T> 294 std::string DataToString(void *data, size_t data_number, size_t print_len = 40) const { 295 if (data == nullptr) { 296 return "Data of tensor is nullptr"; 297 } 298 std::ostringstream oss; 299 auto casted_data = static_cast<T *>(data); 300 for (size_t i = 0; i < print_len && i < data_number; i++) { 301 oss << " " << casted_data[i]; 302 } 303 return oss.str(); 304 } 305 306 protected: 307 TensorC tensor_c_; 308 std::string tensor_name_; 309 std::atomic_int ref_count_ = {0}; 310 int init_ref_count_ = 0; 311 std::vector<LiteQuantParam> quant_params_; 312 std::vector<float> quant_clusters_; 313 AllocatorPtr allocator_ = nullptr; 314 bool own_data_{false}; 315 float scale_ = 1.0f; 316 void *device_data_ = nullptr; 317 CompressType compress_type_ = kNoCompression; 318 size_t compressed_size_ = 0; 319 std::string device_ = ""; 320 int device_id_ = -1; 321 }; 322 } // namespace lite 323 } // namespace mindspore 324 325 using TensorPtr = std::shared_ptr<mindspore::lite::Tensor>; 326 #endif // MINDSPORE_LITE_SRC_TENSOR_H_ 327