1 /**
2 * Copyright 2020-2021 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "minddata/dataset/include/dataset/execute.h"
18
19 #include <algorithm>
20 #include <fstream>
21
22 #include "minddata/dataset/core/de_tensor.h"
23 #include "minddata/dataset/core/tensor_row.h"
24 #include "minddata/dataset/core/tensor.h"
25 #include "minddata/dataset/core/type_id.h"
26 #include "minddata/dataset/kernels/ir/tensor_operation.h"
27 #include "minddata/dataset/kernels/tensor_op.h"
28 #ifndef ENABLE_ANDROID
29 #include "utils/log_adapter.h"
30 #else
31 #include "mindspore/lite/src/common/log_adapter.h"
32 #endif
33 #ifdef ENABLE_ACL
34 #include "minddata/dataset/core/ascend_resource.h"
35 #include "minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h"
36 #include "minddata/dataset/kernels/ir/vision/ascend_vision_ir.h"
37 #endif
38
39 namespace mindspore {
40 namespace dataset {
41
42 using json = nlohmann::json;
43 struct Execute::ExtraInfo {
44 std::multimap<std::string, std::vector<uint32_t>> aipp_cfg_;
45 bool init_with_shared_ptr_ = true; // Initial execute object with shared_ptr as default
46 #ifdef ENABLE_ACL
47 std::multimap<std::string, std::string> op2para_map_ = {{vision::kDvppCropJpegOperation, "size"},
48 {vision::kDvppDecodeResizeOperation, "size"},
49 {vision::kDvppDecodeResizeCropOperation, "crop_size"},
50 {vision::kDvppDecodeResizeCropOperation, "resize_size"},
51 {vision::kDvppNormalizeOperation, "mean"},
52 {vision::kDvppNormalizeOperation, "std"},
53 {vision::kDvppResizeJpegOperation, "size"}};
54 #endif
55 };
56
InitResource(MapTargetDevice device_type,uint32_t device_id)57 Status Execute::InitResource(MapTargetDevice device_type, uint32_t device_id) {
58 #ifdef ENABLE_ACL
59 if (device_type_ == MapTargetDevice::kAscend310) {
60 device_resource_ = std::make_shared<AscendResource>();
61 Status rc = device_resource_->InitResource(device_id);
62 if (!rc.IsOk()) {
63 device_resource_ = nullptr;
64 std::string err_msg = "Initialize Ascend310 resource fail";
65 MS_LOG(ERROR) << err_msg;
66 RETURN_STATUS_UNEXPECTED(err_msg);
67 }
68 }
69 #endif
70 return Status::OK();
71 }
72
Execute(std::shared_ptr<TensorOperation> op,MapTargetDevice device_type,uint32_t device_id)73 Execute::Execute(std::shared_ptr<TensorOperation> op, MapTargetDevice device_type, uint32_t device_id) {
74 ops_.emplace_back(std::move(op));
75 device_type_ = device_type;
76 info_ = std::make_shared<ExtraInfo>();
77 (void)InitResource(device_type, device_id);
78 }
79
Execute(std::shared_ptr<TensorTransform> op,MapTargetDevice device_type,uint32_t device_id)80 Execute::Execute(std::shared_ptr<TensorTransform> op, MapTargetDevice device_type, uint32_t device_id) {
81 // Initialize the op and other context
82 transforms_.emplace_back(op);
83
84 info_ = std::make_shared<ExtraInfo>();
85 device_type_ = device_type;
86 (void)InitResource(device_type, device_id);
87 }
88
Execute(std::reference_wrapper<TensorTransform> op,MapTargetDevice device_type,uint32_t device_id)89 Execute::Execute(std::reference_wrapper<TensorTransform> op, MapTargetDevice device_type, uint32_t device_id) {
90 // Initialize the transforms_ and other context
91 std::shared_ptr<TensorOperation> operation = op.get().Parse();
92 ops_.emplace_back(std::move(operation));
93
94 info_ = std::make_shared<ExtraInfo>();
95 info_->init_with_shared_ptr_ = false;
96 device_type_ = device_type;
97 (void)InitResource(device_type, device_id);
98 }
99
100 // Execute function for the example case: auto decode(new vision::Decode());
Execute(TensorTransform * op,MapTargetDevice device_type,uint32_t device_id)101 Execute::Execute(TensorTransform *op, MapTargetDevice device_type, uint32_t device_id) {
102 // Initialize the transforms_ and other context
103 std::shared_ptr<TensorTransform> smart_ptr_op(op);
104 transforms_.emplace_back(smart_ptr_op);
105
106 info_ = std::make_shared<ExtraInfo>();
107 device_type_ = device_type;
108 (void)InitResource(device_type, device_id);
109 }
110
Execute(std::vector<std::shared_ptr<TensorOperation>> ops,MapTargetDevice device_type,uint32_t device_id)111 Execute::Execute(std::vector<std::shared_ptr<TensorOperation>> ops, MapTargetDevice device_type, uint32_t device_id)
112 : ops_(std::move(ops)), device_type_(device_type) {
113 info_ = std::make_shared<ExtraInfo>();
114 (void)InitResource(device_type, device_id);
115 }
116
Execute(std::vector<std::shared_ptr<TensorTransform>> ops,MapTargetDevice device_type,uint32_t device_id)117 Execute::Execute(std::vector<std::shared_ptr<TensorTransform>> ops, MapTargetDevice device_type, uint32_t device_id) {
118 // Initialize the transforms_ and other context
119 transforms_ = ops;
120
121 info_ = std::make_shared<ExtraInfo>();
122 device_type_ = device_type;
123 (void)InitResource(device_type, device_id);
124 }
125
Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops,MapTargetDevice device_type,uint32_t device_id)126 Execute::Execute(const std::vector<std::reference_wrapper<TensorTransform>> ops, MapTargetDevice device_type,
127 uint32_t device_id) {
128 // Initialize the transforms_ and other context
129 if (device_type == MapTargetDevice::kCpu) {
130 (void)std::transform(
131 ops.begin(), ops.end(), std::back_inserter(ops_),
132 [](TensorTransform &operation) -> std::shared_ptr<TensorOperation> { return operation.Parse(); });
133 } else {
134 for (auto &op : ops) {
135 ops_.emplace_back(op.get().Parse(device_type));
136 }
137 }
138
139 info_ = std::make_shared<ExtraInfo>();
140 info_->init_with_shared_ptr_ = false;
141 device_type_ = device_type;
142 (void)InitResource(device_type, device_id);
143 }
144
145 // Execute function for the example vector case: auto decode(new vision::Decode());
Execute(const std::vector<TensorTransform * > & ops,MapTargetDevice device_type,uint32_t device_id)146 Execute::Execute(const std::vector<TensorTransform *> &ops, MapTargetDevice device_type, uint32_t device_id) {
147 // Initialize the transforms_ and other context
148 for (auto &op : ops) {
149 std::shared_ptr<TensorTransform> smart_ptr_op(op);
150 transforms_.emplace_back(smart_ptr_op);
151 }
152
153 info_ = std::make_shared<ExtraInfo>();
154 device_type_ = device_type;
155 (void)InitResource(device_type, device_id);
156 }
157
~Execute()158 Execute::~Execute() {
159 #ifdef ENABLE_ACL
160 if (device_type_ == MapTargetDevice::kAscend310) {
161 if (device_resource_) {
162 auto rc = device_resource_->FinalizeResource();
163 if (rc.IsError()) {
164 MS_LOG(ERROR) << "Device resource release failed, error msg is " << rc;
165 }
166 } else {
167 MS_LOG(ERROR) << "Device resource is nullptr which is illegal under case Ascend310";
168 }
169 }
170 #endif
171 }
172
operator ()(const mindspore::MSTensor & input,mindspore::MSTensor * output)173 Status Execute::operator()(const mindspore::MSTensor &input, mindspore::MSTensor *output) {
174 // Validate input tensor
175 RETURN_UNEXPECTED_IF_NULL(output);
176 CHECK_FAIL_RETURN_UNEXPECTED(input.DataSize() > 0, "Input Tensor has no data.");
177 CHECK_FAIL_RETURN_UNEXPECTED(output != nullptr, "Output Tensor can not be nullptr.");
178 CHECK_FAIL_RETURN_UNEXPECTED(ValidateDevice(), "Device Type should be 'Ascend310' or 'CPU'.");
179
180 // Parse TensorTransform transforms_ into TensorOperation ops_
181 if (info_->init_with_shared_ptr_) {
182 RETURN_IF_NOT_OK(ParseTransforms());
183 info_->init_with_shared_ptr_ = false;
184 }
185 CHECK_FAIL_RETURN_UNEXPECTED(!ops_.empty(), "Input TensorOperation should be provided.");
186
187 // Validate and build runtime ops
188 std::vector<std::shared_ptr<TensorOp>> transforms; // record the transformations
189
190 std::map<MapTargetDevice, std::string> env_list = {
191 {MapTargetDevice::kCpu, "kCpu"}, {MapTargetDevice::kGpu, "kGpu"}, {MapTargetDevice::kAscend310, "kAscend310"}};
192
193 for (int32_t i = 0; i < ops_.size(); i++) {
194 if (ops_[i] == nullptr) {
195 std::string err_msg = "Input TensorOperation[" + std::to_string(i) +
196 "] is unsupported on your input device:" + env_list.at(device_type_);
197 MS_LOG(ERROR) << err_msg;
198 RETURN_STATUS_UNEXPECTED(err_msg);
199 }
200 RETURN_IF_NOT_OK(ops_[i]->ValidateParams());
201 transforms.emplace_back(ops_[i]->Build());
202 }
203
204 if (device_type_ == MapTargetDevice::kCpu) {
205 // Convert mindspore::Tensor to dataset::Tensor
206 std::shared_ptr<dataset::Tensor> de_tensor;
207 Status rc = dataset::Tensor::CreateFromMemory(dataset::TensorShape(input.Shape()),
208 MSTypeToDEType(static_cast<TypeId>(input.DataType())),
209 (const uchar *)(input.Data().get()), input.DataSize(), &de_tensor);
210 if (rc.IsError()) {
211 MS_LOG(ERROR) << rc;
212 return rc;
213 }
214
215 // Apply transforms on tensor
216 for (auto &t : transforms) {
217 TensorRow de_tensor_row;
218 TensorRow de_output_row;
219 de_tensor_row.push_back(de_tensor);
220 de_output_row.resize(1);
221 Status rc_ = t->Compute(de_tensor_row, &de_output_row);
222 if (rc_.IsError()) {
223 MS_LOG(ERROR) << rc_;
224 return rc_;
225 }
226
227 // For next transform
228 de_tensor = std::move(de_output_row[0]);
229 }
230
231 // Convert dataset::Tensor to mindspore::Tensor
232 if (!de_tensor->HasData()) {
233 std::stringstream ss;
234 ss << "Transformation returned an empty tensor with shape " << de_tensor->shape();
235 RETURN_STATUS_UNEXPECTED(ss.str());
236 }
237 *output = mindspore::MSTensor(std::make_shared<DETensor>(de_tensor));
238 } else if (device_type_ ==
239 MapTargetDevice::kAscend310) { // Ascend310 case, where we must set Ascend resource on each operators
240 #ifdef ENABLE_ACL
241 CHECK_FAIL_RETURN_UNEXPECTED(device_resource_, "Device resource is nullptr which is illegal under case Ascend310.");
242 // Sink data from host into device
243 std::shared_ptr<mindspore::dataset::DeviceTensor> device_input;
244 RETURN_IF_NOT_OK(device_resource_->Sink(input, &device_input));
245
246 for (auto &t : transforms) {
247 // Initialize AscendResource for each operators
248 std::shared_ptr<DeviceTensor> device_output;
249 RETURN_IF_NOT_OK(t->SetAscendResource(device_resource_));
250
251 RETURN_IF_NOT_OK(t->Compute(device_input, &device_output));
252
253 // For next transform
254 device_input = std::move(device_output);
255 }
256 CHECK_FAIL_RETURN_UNEXPECTED(device_input->HasDeviceData(), "Apply transform failed, output tensor has no data.");
257
258 *output = mindspore::MSTensor(std::make_shared<DETensor>(device_input, true));
259 #endif
260 } else {
261 std::string err_msg = "Your input device is not supported. (Option: CPU or Ascend310)";
262 MS_LOG(ERROR) << err_msg;
263 RETURN_STATUS_UNEXPECTED(err_msg);
264 }
265 return Status::OK();
266 }
267
operator ()(const std::vector<MSTensor> & input_tensor_list,std::vector<MSTensor> * output_tensor_list)268 Status Execute::operator()(const std::vector<MSTensor> &input_tensor_list, std::vector<MSTensor> *output_tensor_list) {
269 // Validate input tensor
270 RETURN_UNEXPECTED_IF_NULL(output_tensor_list);
271 CHECK_FAIL_RETURN_UNEXPECTED(!input_tensor_list.empty(), "Input Tensor is not valid.");
272 CHECK_FAIL_RETURN_UNEXPECTED(output_tensor_list != nullptr, "Output Tensor can not be nullptr.");
273 output_tensor_list->clear();
274 for (auto &tensor : input_tensor_list) {
275 CHECK_FAIL_RETURN_UNEXPECTED(tensor.DataSize() > 0, "Input Tensor has no data.");
276 }
277 CHECK_FAIL_RETURN_UNEXPECTED(ValidateDevice(), "Device Type should be 'Ascend310' or 'CPU'.");
278
279 // Parse TensorTransform transforms_ into TensorOperation ops_
280 if (info_->init_with_shared_ptr_) {
281 RETURN_IF_NOT_OK(ParseTransforms());
282 info_->init_with_shared_ptr_ = false;
283 }
284 CHECK_FAIL_RETURN_UNEXPECTED(!ops_.empty(), "Input TensorOperation should be provided.");
285
286 std::map<MapTargetDevice, std::string> env_list = {
287 {MapTargetDevice::kCpu, "kCpu"}, {MapTargetDevice::kGpu, "kGpu"}, {MapTargetDevice::kAscend310, "kAscend310"}};
288
289 // Validate and build runtime ops
290 std::vector<std::shared_ptr<TensorOp>> transforms;
291 for (int32_t i = 0; i < ops_.size(); i++) {
292 if (ops_[i] == nullptr) {
293 std::string err_msg = "Input TensorOperation[" + std::to_string(i) +
294 "] is unsupported on your input device:" + env_list.at(device_type_);
295 MS_LOG(ERROR) << err_msg;
296 RETURN_STATUS_UNEXPECTED(err_msg);
297 }
298 RETURN_IF_NOT_OK(ops_[i]->ValidateParams());
299 transforms.emplace_back(ops_[i]->Build());
300 }
301 if (device_type_ == MapTargetDevice::kCpu) { // Case CPU
302 TensorRow de_tensor_list;
303 for (auto &tensor : input_tensor_list) {
304 std::shared_ptr<dataset::Tensor> de_tensor;
305 Status rc = dataset::Tensor::CreateFromMemory(
306 dataset::TensorShape(tensor.Shape()), MSTypeToDEType(static_cast<TypeId>(tensor.DataType())),
307 (const uchar *)(tensor.Data().get()), tensor.DataSize(), &de_tensor);
308 if (rc.IsError()) {
309 MS_LOG(ERROR) << rc;
310 RETURN_IF_NOT_OK(rc);
311 }
312 de_tensor_list.emplace_back(std::move(de_tensor));
313 }
314 // Apply transforms on tensor
315 for (auto &t : transforms) {
316 TensorRow de_output_list;
317 RETURN_IF_NOT_OK(t->Compute(de_tensor_list, &de_output_list));
318 // For next transform
319 de_tensor_list = std::move(de_output_list);
320 }
321 int32_t idx = 0;
322 for (auto &tensor : de_tensor_list) {
323 if (!tensor->HasData()) {
324 std::stringstream ss;
325 ss << "Transformation returned an empty tensor at location " << idx << ". ";
326 ss << "The shape of the tensor is " << tensor->shape();
327 RETURN_STATUS_UNEXPECTED(ss.str());
328 }
329 auto ms_tensor = mindspore::MSTensor(std::make_shared<DETensor>(tensor));
330 output_tensor_list->emplace_back(ms_tensor);
331 ++idx;
332 }
333 CHECK_FAIL_RETURN_UNEXPECTED(!output_tensor_list->empty(), "Output Tensor is not valid.");
334 } else if (device_type_ ==
335 MapTargetDevice::kAscend310) { // Ascend310 case, where we must set Ascend resource on each operators
336 #ifdef ENABLE_ACL
337 CHECK_FAIL_RETURN_UNEXPECTED(device_resource_, "Device resource is nullptr which is illegal under case Ascend310.");
338 for (auto &input_tensor : input_tensor_list) {
339 // Sink each data from host into device
340 std::shared_ptr<dataset::DeviceTensor> device_input;
341 RETURN_IF_NOT_OK(device_resource_->Sink(input_tensor, &device_input));
342
343 for (auto &t : transforms) {
344 std::shared_ptr<DeviceTensor> device_output;
345 RETURN_IF_NOT_OK(t->SetAscendResource(device_resource_));
346
347 RETURN_IF_NOT_OK(t->Compute(device_input, &device_output));
348
349 // For next transform
350 device_input = std::move(device_output);
351 }
352 CHECK_FAIL_RETURN_UNEXPECTED(device_input->HasDeviceData(), "Apply transform failed, output tensor has no data");
353 // Due to the limitation of Ascend310 memory, we have to pop every data onto host memory
354 // So the speed of this batch method is slower than solo mode
355 std::shared_ptr<mindspore::dataset::Tensor> host_output;
356 RETURN_IF_NOT_OK(device_resource_->Pop(device_input, &host_output));
357
358 auto ms_tensor = mindspore::MSTensor(std::make_shared<DETensor>(host_output));
359 output_tensor_list->emplace_back(ms_tensor);
360 // Release the data on the device because we have copied one piece onto host
361 RETURN_IF_NOT_OK(device_resource_->DeviceDataRelease());
362 }
363 CHECK_FAIL_RETURN_UNEXPECTED(!output_tensor_list->empty(), "Output Tensor vector is empty.");
364 #endif
365 } else {
366 std::string err_msg = "Your input device is not supported. (Option: CPU or Ascend310)";
367 MS_LOG(ERROR) << err_msg;
368 RETURN_STATUS_UNEXPECTED(err_msg);
369 }
370 return Status::OK();
371 }
372
AippSizeFilter(const std::vector<uint32_t> & resize_para,const std::vector<uint32_t> & crop_para)373 std::vector<uint32_t> AippSizeFilter(const std::vector<uint32_t> &resize_para, const std::vector<uint32_t> &crop_para) {
374 std::vector<uint32_t> aipp_size;
375
376 // Special condition where (no Crop and no Resize) or (no Crop and resize with fixed ratio) will lead to dynamic input
377 if ((resize_para.size() == 0 || resize_para.size() == 1) && crop_para.size() == 0) {
378 aipp_size = {0, 0};
379 MS_LOG(WARNING) << "Dynamic input shape is not supported, incomplete aipp config file will be generated. Please "
380 "checkout your TensorTransform input, both src_image_size_h and src_image_size will be 0.";
381 return aipp_size;
382 }
383
384 if (resize_para.size() == 0) { // If only Crop operator exists
385 aipp_size = crop_para;
386 } else if (crop_para.size() == 0) { // If only Resize operator with 2 parameters exists
387 aipp_size = resize_para;
388 } else { // If both of them exist
389 if (resize_para.size() == 1) {
390 aipp_size = crop_para;
391 } else {
392 aipp_size =
393 *min_element(resize_para.begin(), resize_para.end()) < *min_element(crop_para.begin(), crop_para.end())
394 ? resize_para
395 : crop_para;
396 }
397 }
398
399 #ifdef ENABLE_ACL
400 aipp_size[0] = DVPP_ALIGN_UP(aipp_size[0], VPC_HEIGHT_ALIGN); // H
401 aipp_size[1] = DVPP_ALIGN_UP(aipp_size[1], VPC_WIDTH_ALIGN); // W
402 #endif
403 return aipp_size;
404 }
405
AippMeanFilter(const std::vector<uint32_t> & normalize_para)406 std::vector<uint32_t> AippMeanFilter(const std::vector<uint32_t> &normalize_para) {
407 std::vector<uint32_t> aipp_mean;
408 if (normalize_para.size() == 6) { // If Normalize operator exist
409 std::transform(normalize_para.begin(), normalize_para.begin() + 3, std::back_inserter(aipp_mean),
410 [](uint32_t i) { return static_cast<uint32_t>(i / 10000); });
411 } else {
412 aipp_mean = {0, 0, 0};
413 }
414 return aipp_mean;
415 }
416
AippStdFilter(const std::vector<uint32_t> & normalize_para)417 std::vector<float> AippStdFilter(const std::vector<uint32_t> &normalize_para) {
418 std::vector<float> aipp_std;
419 if (normalize_para.size() == 6) { // If Normalize operator exist
420 auto zeros = std::find(std::begin(normalize_para), std::end(normalize_para), 0);
421 if (zeros == std::end(normalize_para)) {
422 if (std::any_of(normalize_para.begin() + 3, normalize_para.end(), [](uint32_t i) { return i == 0; })) {
423 MS_LOG(ERROR) << "value in normalize para got 0.";
424 return {};
425 }
426 std::transform(normalize_para.begin() + 3, normalize_para.end(), std::back_inserter(aipp_std),
427 [](uint32_t i) { return 10000 / static_cast<float>(i); });
428 } else { // If 0 occurs in std vector
429 MS_LOG(WARNING) << "Detect 0 in std vector, please verify your input.";
430 aipp_std = {1.0, 1.0, 1.0};
431 }
432 } else {
433 aipp_std = {1.0, 1.0, 1.0};
434 }
435 return aipp_std;
436 }
437
AippInfoCollection(std::map<std::string,std::string> * aipp_options,const std::vector<uint32_t> & aipp_size,const std::vector<uint32_t> & aipp_mean,const std::vector<float> & aipp_std)438 Status AippInfoCollection(std::map<std::string, std::string> *aipp_options, const std::vector<uint32_t> &aipp_size,
439 const std::vector<uint32_t> &aipp_mean, const std::vector<float> &aipp_std) {
440 // Several aipp config parameters
441 aipp_options->insert(std::make_pair("related_input_rank", "0"));
442 aipp_options->insert(std::make_pair("src_image_size_w", std::to_string(aipp_size[1])));
443 aipp_options->insert(std::make_pair("src_image_size_h", std::to_string(aipp_size[0])));
444 aipp_options->insert(std::make_pair("crop", "false"));
445 aipp_options->insert(std::make_pair("input_format", "YUV420SP_U8"));
446 aipp_options->insert(std::make_pair("aipp_mode", "static"));
447 aipp_options->insert(std::make_pair("csc_switch", "true"));
448 aipp_options->insert(std::make_pair("rbuv_swap_switch", "false"));
449 // Y = AX + b, this part is A
450 std::vector<int32_t> color_space_matrix = {256, 0, 359, 256, -88, -183, 256, 454, 0};
451 int count = 0;
452 for (int i = 0; i < 3; i++) {
453 for (int j = 0; j < 3; j++) {
454 std::string key_word = "matrix_r" + std::to_string(i) + "c" + std::to_string(j);
455 aipp_options->insert(std::make_pair(key_word, std::to_string(color_space_matrix[count])));
456 ++count;
457 }
458 }
459 // This part is b
460 std::vector<uint32_t> color_space_bias = {0, 128, 128};
461 for (int i = 0; i < 3; i++) {
462 std::string key_word = "input_bias_" + std::to_string(i);
463 aipp_options->insert(std::make_pair(key_word, std::to_string(color_space_bias[i])));
464 }
465 // Y = (X - mean - min) * [std^(-1)], this part is mean
466 for (int i = 0; i < aipp_mean.size(); i++) {
467 std::string key_word = "mean_chn_" + std::to_string(i);
468 aipp_options->insert(std::make_pair(key_word, std::to_string(aipp_mean[i])));
469 }
470 // This part is min
471 for (int i = 0; i < aipp_mean.size(); i++) {
472 std::string key_word = "min_chn_" + std::to_string(i);
473 aipp_options->insert(std::make_pair(key_word, "0.0"));
474 }
475 // This part is std^(-1)
476 for (int i = 0; i < aipp_std.size(); i++) {
477 std::string key_word = "var_reci_chn_" + std::to_string(i);
478 aipp_options->insert(std::make_pair(key_word, std::to_string(aipp_std[i])));
479 }
480 return Status::OK();
481 }
482
AippCfgGenerator()483 std::string Execute::AippCfgGenerator() {
484 std::string config_location = "./aipp.cfg";
485 if (info_ == nullptr) {
486 MS_LOG(ERROR) << "info_ is null";
487 return "";
488 }
489 #ifdef ENABLE_ACL
490 if (info_->init_with_shared_ptr_) {
491 auto rc = ParseTransforms();
492 if (rc.IsError()) {
493 MS_LOG(ERROR) << "Parse transforms failed, error msg is " << rc;
494 return "";
495 }
496 info_->init_with_shared_ptr_ = false;
497 }
498 std::vector<uint32_t> paras; // Record the parameters value of each Ascend operators
499 for (int32_t i = 0; i < ops_.size(); i++) {
500 // Validate operator ir
501 json ir_info;
502 if (ops_[i] == nullptr) {
503 MS_LOG(ERROR) << "Input TensorOperation[" + std::to_string(i) + "] is null.";
504 return "";
505 }
506
507 // Define map between operator name and parameter name
508 auto rc = ops_[i]->to_json(&ir_info);
509 if (rc.IsError()) {
510 MS_LOG(ERROR) << "IR information serialize to json failed, error msg is " << rc;
511 return "";
512 }
513
514 // Collect the information of operators
515 for (auto pos = info_->op2para_map_.equal_range(ops_[i]->Name()); pos.first != pos.second; ++pos.first) {
516 auto paras_key_word = pos.first->second;
517 paras = ir_info[paras_key_word].get<std::vector<uint32_t>>();
518 info_->aipp_cfg_.insert(std::make_pair(ops_[i]->Name(), paras));
519 }
520 }
521
522 std::ofstream outfile;
523 outfile.open(config_location, std::ofstream::out);
524
525 if (!outfile.is_open()) {
526 MS_LOG(ERROR) << "Fail to open Aipp config file, please verify your system config(including authority)."
527 << "We will return empty string which represent the location of Aipp config file in this case.";
528 return "";
529 }
530
531 if (device_type_ == MapTargetDevice::kAscend310) {
532 // Process resize parameters and crop parameters to find out the final size of input data
533 std::vector<uint32_t> resize_paras;
534 std::vector<uint32_t> crop_paras;
535
536 // Find resize parameters
537 std::map<std::string, std::vector<uint32_t>>::iterator iter;
538 if (info_->aipp_cfg_.find(vision::kDvppResizeJpegOperation) != info_->aipp_cfg_.end()) {
539 iter = info_->aipp_cfg_.find(vision::kDvppResizeJpegOperation);
540 resize_paras = iter->second;
541 } else if (info_->aipp_cfg_.find(vision::kDvppDecodeResizeOperation) != info_->aipp_cfg_.end()) {
542 iter = info_->aipp_cfg_.find(vision::kDvppDecodeResizeOperation);
543 resize_paras = iter->second;
544 }
545
546 // Find crop parameters
547 if (info_->aipp_cfg_.find(vision::kDvppCropJpegOperation) != info_->aipp_cfg_.end()) {
548 iter = info_->aipp_cfg_.find(vision::kDvppCropJpegOperation);
549 crop_paras = iter->second;
550 } else if (info_->aipp_cfg_.find(vision::kDvppDecodeResizeCropOperation) != info_->aipp_cfg_.end()) {
551 iter = info_->aipp_cfg_.find(vision::kDvppDecodeResizeCropOperation);
552 crop_paras = iter->second;
553 }
554 if (crop_paras.size() == 1) {
555 crop_paras.emplace_back(crop_paras[0]);
556 }
557
558 std::vector<uint32_t> aipp_size = AippSizeFilter(resize_paras, crop_paras);
559
560 // Process Normalization parameters to find out the final Normalization parameters for Aipp module
561 std::vector<uint32_t> normalize_paras;
562 if (info_->aipp_cfg_.find(vision::kDvppNormalizeOperation) != info_->aipp_cfg_.end()) {
563 for (auto pos = info_->aipp_cfg_.equal_range(vision::kDvppNormalizeOperation); pos.first != pos.second;
564 ++pos.first) {
565 auto mean_or_std = pos.first->second;
566 normalize_paras.insert(normalize_paras.end(), mean_or_std.begin(), mean_or_std.end());
567 }
568 }
569
570 std::vector<uint32_t> aipp_mean = AippMeanFilter(normalize_paras);
571 std::vector<float> aipp_std = AippStdFilter(normalize_paras);
572
573 std::map<std::string, std::string> aipp_options;
574 auto rc = AippInfoCollection(&aipp_options, aipp_size, aipp_mean, aipp_std);
575 if (rc.IsError()) {
576 MS_LOG(ERROR) << "Aipp information initialization failed, error msg is " << rc;
577 return "";
578 }
579
580 std::string tab_char(4, ' ');
581 outfile << "aipp_op {" << std::endl;
582 for (auto &option : aipp_options) {
583 outfile << tab_char << option.first << " : " << option.second << std::endl;
584 }
585 outfile << "}";
586 outfile.close();
587 } else { // For case GPU or CPU
588 outfile << "aipp_op {" << std::endl << "}";
589 outfile.close();
590 MS_LOG(WARNING) << "Your runtime environment is not Ascend310, this config file will lead to undefined behavior on "
591 "computing result. Please check that.";
592 }
593 #endif
594 return config_location;
595 }
596
IsEmptyPtr(std::shared_ptr<TensorTransform> api_ptr)597 bool IsEmptyPtr(std::shared_ptr<TensorTransform> api_ptr) { return api_ptr == nullptr; }
598
ParseTransforms()599 Status Execute::ParseTransforms() {
600 auto iter = std::find_if(transforms_.begin(), transforms_.end(), IsEmptyPtr);
601 if (iter != transforms_.end()) {
602 std::string err_msg = "Your input TensorTransforms contain at least one nullptr, please check your input.";
603 MS_LOG(ERROR) << err_msg;
604 RETURN_STATUS_UNEXPECTED(err_msg);
605 }
606
607 if (device_type_ == MapTargetDevice::kCpu) {
608 (void)std::transform(transforms_.begin(), transforms_.end(), std::back_inserter(ops_),
609 [](std::shared_ptr<TensorTransform> operation) -> std::shared_ptr<TensorOperation> {
610 return operation->Parse();
611 });
612 } else if (device_type_ == MapTargetDevice::kAscend310) {
613 for (auto &transform_ : transforms_) {
614 ops_.emplace_back(transform_->Parse(device_type_));
615 }
616 } else {
617 std::string err_msg = "Your input device is not supported. (Option: CPU or Ascend310)";
618 MS_LOG(ERROR) << err_msg;
619 RETURN_STATUS_UNEXPECTED(err_msg);
620 }
621
622 return Status::OK();
623 }
624
ValidateDevice()625 Status Execute::ValidateDevice() {
626 if (device_type_ != MapTargetDevice::kCpu && device_type_ != MapTargetDevice::kAscend310 &&
627 device_type_ != MapTargetDevice::kGpu) {
628 std::string err_msg = "Your input device is not supported. (Option: CPU or GPU or Ascend310).";
629 MS_LOG(ERROR) << err_msg;
630 RETURN_STATUS_UNEXPECTED(err_msg);
631 }
632 return Status::OK();
633 }
634
DeviceMemoryRelease()635 Status Execute::DeviceMemoryRelease() {
636 CHECK_FAIL_RETURN_UNEXPECTED(device_resource_, "Device resource is nullptr which is illegal under case Ascend310.");
637 Status rc = device_resource_->DeviceDataRelease();
638 if (rc.IsError()) {
639 std::string err_msg = "Error in device data release";
640 MS_LOG(ERROR) << err_msg;
641 RETURN_STATUS_UNEXPECTED(err_msg);
642 }
643 return Status::OK();
644 }
645
Run(const std::vector<std::shared_ptr<dataset::Execute>> & data_graph,const std::vector<mindspore::MSTensor> & inputs,std::vector<mindspore::MSTensor> * outputs)646 Status Execute::Run(const std::vector<std::shared_ptr<dataset::Execute>> &data_graph,
647 const std::vector<mindspore::MSTensor> &inputs, std::vector<mindspore::MSTensor> *outputs) {
648 std::vector<MSTensor> transform_inputs = inputs;
649 std::vector<MSTensor> transform_outputs;
650 if (!data_graph.empty()) {
651 for (auto exes : data_graph) {
652 CHECK_FAIL_RETURN_UNEXPECTED(exes != nullptr, "Given execute object is null.");
653 Status ret = exes->operator()(transform_inputs, &transform_outputs);
654 if (ret != kSuccess) {
655 MS_LOG(ERROR) << "Run preprocess failed:" << ret.GetErrDescription();
656 return ret;
657 }
658 MS_LOG(DEBUG) << "transform_outputs[0].Shape: " << transform_outputs[0].Shape();
659 transform_inputs = transform_outputs;
660 }
661 *outputs = std::move(transform_outputs);
662 } else {
663 std::string msg = "The set of Executors can not be empty.";
664 MS_LOG(ERROR) << msg;
665 RETURN_STATUS_UNEXPECTED(msg);
666 }
667 return Status::OK();
668 }
669
670 // In the current stage, there is a cyclic dependency between libmindspore.so and c_dataengine.so,
671 // we make a C function here and dlopen by libminspore.so to avoid linking explicitly,
672 // will be fix after decouling libminspore.so into multi submodules
673 extern "C" {
674 // ExecuteRun_C has C-linkage specified, but returns user-defined type 'mindspore::Status' which is incompatible with C
ExecuteRun_C(const std::vector<std::shared_ptr<dataset::Execute>> & data_graph,std::vector<mindspore::MSTensor> & inputs,std::vector<mindspore::MSTensor> * outputs,Status * s)675 void ExecuteRun_C(const std::vector<std::shared_ptr<dataset::Execute>> &data_graph,
676 std::vector<mindspore::MSTensor> &inputs, std::vector<mindspore::MSTensor> *outputs, Status *s) {
677 Status ret = Execute::Run(data_graph, inputs, outputs);
678 *s = Status(ret);
679 }
680 }
681
682 } // namespace dataset
683 } // namespace mindspore
684