• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #include "minddata/dataset/util/circular_pool.h"
17 #include "minddata/dataset/core/client.h"
18 #include "minddata/dataset/engine/jagged_connector.h"
19 #include "common/common.h"
20 #include "gtest/gtest.h"
21 #include "utils/log_adapter.h"
22 
23 using namespace mindspore::dataset;
24 using mindspore::LogStream;
25 using mindspore::ExceptionType::NoExceptionType;
26 using mindspore::MsLogLevel::INFO;
27 
28 class MindDataTestSkipOp : public UT::DatasetOpTesting {};
29 
TEST_F(MindDataTestSkipOp,TestSkipOpFuntions)30 TEST_F(MindDataTestSkipOp, TestSkipOpFuntions) {
31   // Start with an empty execution tree
32   auto my_tree = std::make_shared<ExecutionTree>();
33   Status rc;
34   std::string dataset_path;
35   dataset_path = datasets_root_path_ + "/testTFTestAllTypes/test.data";
36 
37   std::shared_ptr<ConfigManager> config_manager = GlobalContext::config_manager();
38   int32_t op_connector_size = config_manager->op_connector_size();
39   int32_t num_workers = config_manager->num_parallel_workers();
40   int32_t worker_connector_size = 16;
41   std::unique_ptr<DataSchema> schema = std::make_unique<DataSchema>();
42   schema->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema.json", {});
43   std::vector<std::string> columns_to_load = {};
44   std::vector<std::string> files = {dataset_path};
45   std::shared_ptr<TFReaderOp> my_tfreader_op =
46     std::make_shared<TFReaderOp>(num_workers, worker_connector_size, 0, files, std::move(schema), op_connector_size,
47                                  columns_to_load, false, 1, 0, false);
48   rc = my_tfreader_op->Init();
49   ASSERT_TRUE(rc.IsOk());
50   rc = my_tree->AssociateNode(my_tfreader_op);
51   ASSERT_TRUE(rc.IsOk());
52 
53   // SkipOp
54   std::shared_ptr<SkipOp> skip_op = std::make_shared<SkipOp>(5);
55   rc = my_tree->AssociateNode(skip_op);
56   ASSERT_TRUE(rc.IsOk());
57 
58   // Set children/root layout.
59   rc = skip_op->AddChild(my_tfreader_op);
60   ASSERT_TRUE(rc.IsOk());
61   rc = my_tree->AssignRoot(skip_op);
62   ASSERT_TRUE(rc.IsOk());
63 
64   MS_LOG(INFO) << "Launching tree and begin iteration.";
65   rc = my_tree->Prepare();
66 
67   ASSERT_TRUE(rc.IsOk());
68 
69   rc = my_tree->Launch();
70   ASSERT_TRUE(rc.IsOk());
71 
72   // Start the loop of reading tensors from our pipeline
73   DatasetIterator di(my_tree);
74   TensorRow tensor_list;
75   rc = di.FetchNextTensorRow(&tensor_list);
76   ASSERT_TRUE(rc.IsOk());
77 
78   int row_count = 0;
79   while (!tensor_list.empty()) {
80     MS_LOG(INFO) << "Row display for row #: " << row_count << ".";
81 
82     // Display the tensor by calling the printer on it
83     for (int i = 0; i < tensor_list.size(); i++) {
84       std::ostringstream ss;
85       ss << "(" << tensor_list[i] << "): " << *tensor_list[i] << std::endl;
86       MS_LOG(INFO) << "Tensor print: " << ss.str() << ".";
87     }
88 
89     rc = di.FetchNextTensorRow(&tensor_list);
90     ASSERT_TRUE(rc.IsOk());
91     row_count++;
92   }
93 
94   ASSERT_EQ(row_count, 7);
95 }
96