1 /**
2 * Copyright 2020 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <iostream>
17 #include <memory>
18 #include <vector>
19
20 #include "common/common.h"
21 #include "utils/ms_utils.h"
22 #include "minddata/dataset/core/client.h"
23 #include "minddata/dataset/engine/jagged_connector.h"
24 #include "gtest/gtest.h"
25 #include "utils/log_adapter.h"
26
27 namespace common = mindspore::common;
28
29 using namespace mindspore::dataset;
30 using mindspore::LogStream;
31 using mindspore::ExceptionType::NoExceptionType;
32 using mindspore::MsLogLevel::INFO;
33
34 class MindDataTestTakeOp : public UT::DatasetOpTesting {};
35
TEST_F(MindDataTestTakeOp,TestTakeProject)36 TEST_F(MindDataTestTakeOp, TestTakeProject) {
37 // Start with an empty execution tree
38 auto my_tree = std::make_shared<ExecutionTree>();
39 Status rc;
40 std::string dataset_path;
41 dataset_path = datasets_root_path_ + "/testTFTestAllTypes/test.data";
42
43 // TFReaderOp
44 std::unique_ptr<DataSchema> schema = std::make_unique<DataSchema>();
45 schema->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema.json", {});
46 std::shared_ptr<ConfigManager> config_manager = GlobalContext::config_manager();
47 auto num_workers = 1;
48 auto op_connector_size = config_manager->op_connector_size();
49 std::vector<std::string> columns_to_load = {};
50 std::vector<std::string> files = {dataset_path};
51 // worker connector size = 16
52 std::shared_ptr<TFReaderOp> my_tfreader_op = std::make_shared<TFReaderOp>(
53 num_workers, 16, 0, files, std::make_unique<DataSchema>(), op_connector_size, columns_to_load, false, 1, 0, false);
54 rc = my_tfreader_op->Init();
55 ASSERT_OK(rc);
56 // TakeOp
57 std::shared_ptr<TakeOp> my_take_op = std::make_shared<TakeOp>(5);
58
59 rc = my_tree->AssociateNode(my_tfreader_op);
60 ASSERT_OK(rc);
61 rc = my_tree->AssociateNode(my_take_op);
62 ASSERT_OK(rc);
63
64 // Set children/root layout.
65 rc = my_take_op->AddChild(my_tfreader_op);
66 ASSERT_OK(rc);
67 rc = my_tree->AssignRoot(my_take_op);
68 ASSERT_OK(rc);
69
70 MS_LOG(DEBUG) << "Launching tree and begin iteration.";
71 rc = my_tree->Prepare();
72
73 ASSERT_OK(rc);
74
75 rc = my_tree->Launch();
76 ASSERT_OK(rc);
77
78 // Start the loop of reading tensors from our pipeline
79 DatasetIterator di(my_tree);
80 TensorRow tensor_list;
81 rc = di.FetchNextTensorRow(&tensor_list);
82 ASSERT_OK(rc);
83
84 int row_count = 0;
85 while (!tensor_list.empty()) {
86 MS_LOG(DEBUG) << "Row display for row #: " << row_count << ".";
87
88 // Display the tensor by calling the printer on it
89 for (int i = 0; i < tensor_list.size(); i++) {
90 std::ostringstream ss;
91 ss << "(" << tensor_list[i] << "): " << *tensor_list[i] << std::endl;
92 MS_LOG(DEBUG) << "Tensor print: " << ss.str() << ".";
93 }
94
95 rc = di.FetchNextTensorRow(&tensor_list);
96 ASSERT_OK(rc);
97 row_count++;
98 }
99
100 ASSERT_EQ(row_count, 5);
101 }
102