1 /**
2 * Copyright 2020 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <iostream>
17 #include <memory>
18 #include "common/common_test.h"
19 #include "pipeline/jit/parse/python_adapter.h"
20 #include "pipeline/jit/parse/data_converter.h"
21 #include "frontend/operator/ops.h"
22 #include "pipeline/pynative/pynative_execute.h"
23 #include "utils/ms_context.h"
24 #include "utils/utils.h"
25
26 namespace py = pybind11;
27 using pybind11::literals::operator"" _a;
28 using Tensor = mindspore::tensor::Tensor;
29 using TensorPtr = mindspore::tensor::TensorPtr;
30
31 namespace mindspore {
32 namespace pynative {
33 class TestPynativeExecute : public UT::Common {
34 public:
TestPynativeExecute()35 TestPynativeExecute() {}
36 };
37
PyAttrValue(const py::object & obj)38 inline ValuePtr PyAttrValue(const py::object &obj) {
39 ValuePtr converted_ret;
40 bool converted = parse::ConvertData(obj, &converted_ret);
41 if (!converted) {
42 MS_LOG(EXCEPTION) << "attribute convert error with type:" << std::string(py::str(obj));
43 }
44 return converted_ret;
45 }
46
ConstructOpExecInfo()47 OpExecInfoPtr ConstructOpExecInfo() {
48 py::str op_name = "Conv2D";
49 py::object tensor_py_module = py::module::import("mindspore.common.tensor").attr("Tensor");
50 py::object np_py_module = py::module::import("numpy");
51 py::object np_ones = np_py_module.attr("ones");
52 py::object np_float32 = np_py_module.attr("float32");
53 py::tuple weight_dim = py::make_tuple(64, 3, 3, 3);
54 py::object weight = tensor_py_module(np_float32(np_ones(weight_dim)));
55 py::tuple op_params = py::make_tuple(weight);
56 py::tuple inputs_dim = py::make_tuple(1, 3, 6, 6);
57 py::object input = tensor_py_module(np_float32(np_ones(inputs_dim)));
58 py::tuple op_inputs = py::make_tuple(input, weight);
59
60 py::tuple kernel_size = py::make_tuple(3, 3);
61 py::dict op_attrs = py::dict("out_channel"_a = 64, "kernel_size"_a = kernel_size, "mode"_a = 1, "pad_mode"_a = "same",
62 "stride"_a = 1, "dilation"_a = 1, "group"_a = 1, "data_format"_a = kOpFormat_NCHW);
63
64 auto conv_obj = prim::GetPythonOps("conv2d_prim", "gtest_input.pynative");
65 py::none py_none;
66 py::args args = py::make_tuple(conv_obj, op_name, op_inputs);
67 py::list args_input = args[PY_INPUTS];
68 return PynativeExecutor::GetInstance()->forward_executor()->GenerateOpExecInfo(args);
69 }
70
TEST_F(TestPynativeExecute,TestCreateContext)71 TEST_F(TestPynativeExecute, TestCreateContext) {
72 auto ctx3 = MsContext::GetInstance();
73 ASSERT_EQ(ctx3->backend_policy(), "vm");
74 ASSERT_EQ(ctx3->get_param<std::string>(MS_CTX_DEVICE_TARGET), "CPU");
75
76 ctx3->set_backend_policy("ge_only");
77 ctx3->set_param<std::string>(MS_CTX_DEVICE_TARGET, "GPU");
78 auto ctx4 = MsContext::GetInstance();
79
80 ASSERT_EQ(ctx3.get(), ctx4.get());
81 ASSERT_EQ(ctx4->backend_policy(), "ge_only");
82 ASSERT_EQ(ctx4->get_param<std::string>(MS_CTX_DEVICE_TARGET), "GPU");
83 }
84
TEST_F(TestPynativeExecute,TestDefaultContext)85 TEST_F(TestPynativeExecute, TestDefaultContext) {
86 auto ctx = MsContext::GetInstance();
87
88 ASSERT_EQ(std::string(ctx->backend_policy()), "ge_only");
89
90 auto ctx2 = MsContext::GetInstance();
91
92 ASSERT_EQ(ctx.get(), ctx2.get());
93 }
94
95 } // namespace pynative
96 } // namespace mindspore
97