1 /**
2 * Copyright 2020-2022 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef MINDSPORE_LITE_TOOLS_COMMON_TENSOR_UTIL_H_
18 #define MINDSPORE_LITE_TOOLS_COMMON_TENSOR_UTIL_H_
19
20 #include <cmath>
21 #include <unordered_map>
22 #include <memory>
23 #include <algorithm>
24 #include <utility>
25 #include <string>
26 #include <vector>
27 #include <random>
28 #include <cfloat>
29 #include "schema/inner/model_generated.h"
30 #include "src/common/log_util.h"
31 #include "src/common/log_adapter.h"
32 #include "ir/dtype/type_id.h"
33 #include "ir/tensor.h"
34 #include "src/common/utils.h"
35 #include "tools/common/statistic_utils.h"
36 #include "src/tensor.h"
37 #include "include/api/model.h"
38
39 namespace mindspore {
40 namespace lite {
41 using schema::CNodeT;
42 using schema::Format;
43 using schema::FusedBatchNormT;
44 using schema::MetaGraphT;
45 using schema::QuantParamT;
46 using schema::TensorT;
47
48 std::unique_ptr<QuantParamT> GetTensorQuantParam(const std::unique_ptr<TensorT> &tensor);
49
50 tensor::TensorPtr CreateTensorInfo(const void *data, size_t data_size, const std::vector<int64_t> &shape,
51 TypeId data_type);
52
53 AbstractBasePtr CreateTensorAbstract(const std::vector<int64_t> &shape, TypeId data_type);
54
55 int SetParameterAbstractAndParam(const ParameterPtr ¶meter, const void *data, size_t data_size,
56 const std::vector<int64_t> &shape, TypeId data_type);
57
58 int SetTensorData(const tensor::TensorPtr &tensor_info, const void *data, size_t data_size);
59
60 std::unique_ptr<schema::TensorT> CreateTensorTFromTensorInfo(const tensor::TensorPtr &tensor_info,
61 const std::string &tensor_name = "");
62
63 int UpdateTensorTFromTensorInfo(const tensor::TensorPtr &src_tensor, std::unique_ptr<schema::TensorT> *dst_tensor);
64
65 int InitParameterFromTensorInfo(const ParameterPtr ¶m_node, const tensor::TensorPtr &tensor_info);
66
67 size_t GetElementSize(const TensorT &tensor);
68
69 size_t GetElementSize(const TypeId &dataType);
70
71 size_t GetShapeSize(const TensorT &tensor);
72
73 size_t GetShapeSize(const std::vector<int32_t> &shape);
74
75 std::unique_ptr<TensorT> CopyTensorDefT(const std::unique_ptr<TensorT> &);
76
77 size_t GetRefCount(schema::MetaGraphT *graphT, uint32_t tensorIdx);
78
79 std::unique_ptr<schema::QuantParamT> CopyQuantParamT(const std::unique_ptr<schema::QuantParamT> &srcQuantParam);
80
81 int GenerateRandomData(mindspore::MSTensor *tensors);
82
83 int GenerateRandomData(size_t size, void *data, int data_type);
84
85 template <typename T, typename Distribution>
FillInputData(size_t size,void * data,Distribution distribution)86 void FillInputData(size_t size, void *data, Distribution distribution) {
87 std::mt19937 random_engine;
88 MS_ASSERT(data != nullptr);
89 size_t elements_num = size / sizeof(T);
90 (void)std::generate_n(static_cast<T *>(data), elements_num,
91 [&]() { return static_cast<T>(distribution(random_engine)); });
92 }
93
94 struct CheckTensor {
95 CheckTensor(const std::string &tensor_name, const std::vector<size_t> &shape, const std::vector<float> &data,
96 const std::vector<std::string> &strings_data = {""}) {
97 this->tensor_name = tensor_name;
98 this->shape = shape;
99 this->data = data;
100 this->strings_data = strings_data;
101 }
102 std::string tensor_name;
103 std::vector<size_t> shape;
104 std::vector<float> data;
105 std::vector<std::string> strings_data;
106 };
107
108 // tensorData need to be converter first
109 template <typename T>
CompareDataByCosineDistance(const std::shared_ptr<mindspore::Model> & origin_model,const std::shared_ptr<mindspore::Model> & quant_model)110 float CompareDataByCosineDistance(const std::shared_ptr<mindspore::Model> &origin_model,
111 const std::shared_ptr<mindspore::Model> &quant_model) {
112 CHECK_NULL_RETURN(origin_model);
113 CHECK_NULL_RETURN(quant_model);
114 if (origin_model->GetOutputs().empty() || quant_model->GetOutputs().empty()) {
115 MS_LOG(ERROR) << "calib or out tenor is empty.";
116 return RET_ERROR;
117 }
118 float total_cos = 0;
119 auto calib_tensors = origin_model->GetOutputs();
120 for (const auto &calib_tensor : calib_tensors) {
121 size_t error_count = 0;
122 float mean_error = 0;
123 auto calib_data = reinterpret_cast<const T *>(calib_tensor.Data().get());
124 auto out_tensor = quant_model->GetOutputByTensorName(calib_tensor.Name());
125 if (out_tensor == nullptr) {
126 MS_LOG(ERROR) << "Cant find " << calib_tensor.Name() << " in out_tensors";
127 return RET_ERROR;
128 }
129 auto out_data = reinterpret_cast<const T *>(out_tensor.Data().get());
130 auto cos = mindspore::lite::GetCosSimilarity<T>(calib_data, out_data, static_cast<size_t>(out_tensor.ElementNum()));
131 total_cos += cos;
132 MS_LOG(INFO) << "tensor_name:" << calib_tensor.Name() << " cos_sim: " << mean_error
133 << " error_count:" << error_count;
134 }
135 return total_cos / calib_tensors.size();
136 }
137
138 template <typename T>
CompareData(const std::shared_ptr<mindspore::Model> & origin_model,const std::shared_ptr<mindspore::Model> & quant_model)139 float CompareData(const std::shared_ptr<mindspore::Model> &origin_model,
140 const std::shared_ptr<mindspore::Model> &quant_model) {
141 CHECK_NULL_RETURN(origin_model);
142 CHECK_NULL_RETURN(quant_model);
143 if (origin_model->GetOutputs().empty() || quant_model->GetOutputs().empty()) {
144 MS_LOG(ERROR) << "calib or out tenor is empty.";
145 return RET_ERROR;
146 }
147 float total_meam_error = 0;
148 auto calib_tensors = origin_model->GetOutputs();
149 for (const auto &calib_tensor : calib_tensors) {
150 size_t error_count = 0;
151 float mean_error = 0;
152 auto calib_data = reinterpret_cast<const T *>(calib_tensor.Data().get());
153 auto out_tensor = quant_model->GetOutputByTensorName(calib_tensor.Name());
154 if (out_tensor == nullptr) {
155 MS_LOG(ERROR) << "Cant find " << calib_tensor.Name() << " in out_tensors";
156 return RET_ERROR;
157 }
158 auto out_data = reinterpret_cast<const T *>(out_tensor.Data().get());
159 for (int j = 0; j < calib_tensor.ElementNum(); j++) {
160 if (std::is_same<T, float>::value && (std::isnan(out_data[j]) || std::isinf(out_data[j]))) {
161 MS_LOG(ERROR) << "Output tensor has nan or inf data, compare fail";
162 return RET_ERROR;
163 }
164 constexpr float relativeTolerance = 1e-5;
165 constexpr float absoluteTolerance = 1e-8;
166 auto tolerance = absoluteTolerance + relativeTolerance * fabs(calib_data[j]);
167 auto absolute_error = std::fabs(out_data[j] - calib_data[j]);
168 if (absolute_error > tolerance) {
169 if (fabs(calib_data[j] - 0.0f) < FLT_EPSILON) {
170 if (absolute_error > 1e-5) {
171 mean_error += absolute_error;
172 error_count++;
173 } else {
174 continue;
175 }
176 } else {
177 // just assume that atol = rtol
178 mean_error += absolute_error / (fabs(calib_data[j]) + FLT_MIN);
179 error_count++;
180 }
181 }
182 }
183 if (mean_error > 0.0f && error_count > 0) {
184 mean_error /= error_count;
185 }
186 total_meam_error += std::abs(mean_error);
187 MS_LOG(INFO) << "tensor_name:" << calib_tensor.Name() << " mean_error: " << mean_error
188 << " error_count:" << error_count;
189 }
190 return total_meam_error / calib_tensors.size();
191 }
192 } // namespace lite
193 } // namespace mindspore
194
195 #endif // MINDSPORE_LITE_TOOLS_COMMON_TENSOR_UTIL_H_
196