• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2023 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "tests/ut/cpp/common/device_common_test.h"
18 
19 #include "mindspore/core/ops/comparison_ops.h"
20 #include "mindspore/core/ops/framework_ops.h"
21 #include "mindspore/core/ops/math_ops.h"
22 
23 namespace mindspore {
24 namespace runtime {
25 using namespace test;
26 class AnyTypeKernelActorTest : public UT::Common {
27  public:
AnyTypeKernelActorTest()28   AnyTypeKernelActorTest() {}
29 };
30 
31 namespace {
BuildAnyTypeGraph()32 std::pair<FuncGraphPtr, KernelGraphPtr> BuildAnyTypeGraph() {
33   std::vector<int64_t> shp{2, 2};
34   auto func_graph = std::make_shared<FuncGraph>();
35   auto abstract_x = std::make_shared<abstract::AbstractTensor>(kFloat64, shp);
36   auto parameter_x = func_graph->add_parameter();
37   parameter_x->set_abstract(abstract_x);
38 
39   auto abstract_y = std::make_shared<abstract::AbstractAny>();
40   auto parameter_y = func_graph->add_parameter();
41   parameter_y->set_abstract(abstract_y);
42 
43   // Add.
44   std::vector<AnfNodePtr> add_inputs{NewValueNode(prim::kPrimAdd), parameter_x, parameter_y};
45   auto add_node = func_graph->NewCNode(add_inputs);
46   auto add_abs = std::make_shared<abstract::AbstractAny>();
47   add_node->set_abstract(add_abs);
48 
49   // Return.
50   std::vector<AnfNodePtr> return_inputs{NewValueNode(prim::kPrimReturn), add_node};
51   auto return_node = func_graph->NewCNode(return_inputs);
52   auto return_abs = std::make_shared<abstract::AbstractAny>();
53   return_node->set_abstract(return_abs);
54   func_graph->set_return(return_node);
55 
56   auto kernel_graph = std::make_shared<KernelGraph>();
57   auto backend_abstract_x = std::make_shared<abstract::AbstractTensor>(kFloat64, shp);
58   auto backend_parameter_x = kernel_graph->add_parameter();
59   backend_parameter_x->set_abstract(backend_abstract_x);
60 
61   auto backend_abstract_y = std::make_shared<abstract::AbstractAny>();
62   auto backend_parameter_y = kernel_graph->add_parameter();
63   backend_parameter_y->set_abstract(backend_abstract_y);
64 
65   // Add.
66   std::vector<AnfNodePtr> backend_add_inputs{NewValueNode(prim::kPrimAdd), backend_parameter_x, backend_parameter_y};
67   auto backend_add_node = kernel_graph->NewCNode(backend_add_inputs);
68   auto backend_add_abs = std::make_shared<abstract::AbstractAny>();
69   backend_add_node->set_abstract(backend_add_abs);
70 
71   // Return.
72   std::vector<AnfNodePtr> backend_return_inputs{NewValueNode(prim::kPrimReturn), backend_add_node};
73   auto backend_return_node = kernel_graph->NewCNode(backend_return_inputs);
74   auto backend_return_abs = std::make_shared<abstract::AbstractAny>();
75   backend_return_node->set_abstract(backend_return_abs);
76   kernel_graph->set_return(backend_return_node);
77   kernel_graph->set_execution_order({backend_add_node});
78   kernel_graph->input_nodes_.emplace_back(backend_parameter_x);
79   kernel_graph->input_nodes_.emplace_back(backend_parameter_y);
80 
81   kernel_graph->CacheGraphOutputToFrontNodeWithIndex({kernel_graph->output()}, {func_graph->output()});
82   kernel_graph->FrontBackendMapAdd(add_node, backend_add_node);
83   kernel_graph->FrontBackendMapAdd(parameter_x, backend_parameter_x);
84   kernel_graph->FrontBackendMapAdd(parameter_y, backend_parameter_y);
85 
86   return std::make_pair(func_graph, kernel_graph);
87 }
88 }  // namespace
89 
90 /// Feature: Pyexecute any type output.
91 /// Description: Test the compile of any type.
92 /// Expectation: As expected.
TEST_F(AnyTypeKernelActorTest,RunOpData)93 TEST_F(AnyTypeKernelActorTest, RunOpData) {
94   const char device_name[] = "CPU";
95   uint32_t device_id = 0;
96 
97   auto ms_context = MsContext::GetInstance();
98   int last_execution_mode = ms_context->get_param<int>(MS_CTX_EXECUTION_MODE);
99   bool last_enable_mindrt = ms_context->get_param<bool>(MS_CTX_ENABLE_MINDRT);
100   uint32_t last_device_id = ms_context->get_param<uint32_t>(MS_CTX_DEVICE_ID);
101   std::string last_device_target = ms_context->get_param<std::string>(MS_CTX_DEVICE_TARGET);
102 
103   ms_context->set_param<int>(MS_CTX_EXECUTION_MODE, kGraphMode);
104   ms_context->set_param<bool>(MS_CTX_ENABLE_MINDRT, true);
105   ms_context->set_param<uint32_t>(MS_CTX_DEVICE_ID, device_id);
106   ms_context->set_param<std::string>(MS_CTX_DEVICE_TARGET, device_name);
107   MS_REGISTER_DEVICE(device_name, TestDeviceContext);
108   DeviceContextKey device_context_key{device_name, device_id};
109   auto device_context = std::make_shared<TestDeviceContext>(device_context_key);
110 
111   auto graph_pair = BuildAnyTypeGraph();
112   auto func_graph = graph_pair.first;
113   auto kernel_graph = graph_pair.second;
114   auto &memory_manager_actor = MemoryManagerActor::GetInstance();
115   const auto &any_type_kernel_actor =
116     std::make_shared<AnyTypeKernelActor>(kernel_graph->ToString() + "_AnyTypeKernelActor", kernel_graph,
117                                          device_context.get(), memory_manager_actor->GetAID(), nullptr, nullptr);
118 
119   using DataType = float;
120   DataType input_0 = 2.0;
121   DataType input_1 = 2.0;
122   ShapeVector shape = {1};
123 
124   OpContext<DeviceAddress> op_context;
125   std::vector<Promise<int>> result(1);
126   op_context.sequential_num_ = 140429;
127   op_context.results_ = &result;
128 
129   auto kernel_tensor0 = std::make_shared<kernel::KernelTensor>(
130     &input_0, sizeof(DataType), Format::DEFAULT_FORMAT, TypeId::kNumberTypeFloat32, shape,
131     device_context->device_context_key().device_name_, device_context->device_context_key().device_id_);
132   auto device_address0 = device_context->device_res_manager_->CreateDeviceAddress(kernel_tensor0);
133   auto op_data0 = std::make_shared<OpData<DeviceTensor>>(any_type_kernel_actor->GetAID(), device_address0.get(), 0);
134 
135   auto kernel_tensor1 = std::make_shared<kernel::KernelTensor>(
136     &input_1, sizeof(DataType), Format::DEFAULT_FORMAT, TypeId::kNumberTypeFloat32, shape,
137     device_context->device_context_key().device_name_, device_context->device_context_key().device_id_);
138   auto device_address1 = device_context->device_res_manager_->CreateDeviceAddress(kernel_tensor1);
139   auto op_data1 = std::make_shared<OpData<DeviceTensor>>(any_type_kernel_actor->GetAID(), device_address1.get(), 1);
140 
141   any_type_kernel_actor->input_datas_num_ = 2;
142   any_type_kernel_actor->any_type_parameter_indexes_.emplace_back(1);
143   any_type_kernel_actor->RunOpData(op_data1.get(), &op_context);
144 
145   ASSERT_EQ(any_type_kernel_actor->input_op_datas_[op_context.sequential_num_].size(), 1);
146 
147   ms_context->set_param<int>(MS_CTX_EXECUTION_MODE, last_execution_mode);
148   ms_context->set_param<bool>(MS_CTX_ENABLE_MINDRT, last_enable_mindrt);
149   ms_context->set_param<uint32_t>(MS_CTX_DEVICE_ID, last_device_id);
150   ms_context->set_param<std::string>(MS_CTX_DEVICE_TARGET, last_device_target);
151 }
152 }  // namespace runtime
153 }  // namespace mindspore
154