1 /**
2 * Copyright 2022-2023 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 // NOTICE: This header file should only be included once in the whole project.
18 // We change the cpp file to header file, to avoid MSVC compiler problem.
19 #ifndef MINDSPORE_CCSRC_PYBINDAPI_IR_PY_EXECUTE_PY_H_
20 #define MINDSPORE_CCSRC_PYBINDAPI_IR_PY_EXECUTE_PY_H_
21
22 #include <vector>
23 #include <string>
24 #include <memory>
25 #include <utility>
26
27 #include "pybind11/pybind11.h"
28 #include "pybind_api/pybind_patch.h"
29
30 #include "include/common/fallback.h"
31 #include "mindspore/core/ops/py_execute.h"
32 #include "mindspore/ccsrc/include/common/utils/convert_utils_py.h"
33 #include "mindspore/ccsrc/include/common/utils/python_utils.h"
34 #include "mindspore/ccsrc/include/common/utils/python_adapter.h"
35 #include "mindspore/ccsrc/include/common/utils/python_fallback_running.h"
36 #include "mindspore/ccsrc/include/backend/optimizer/helper.h"
37 #include "mindspore/ccsrc/pipeline/jit/ps/parse/data_converter.h"
38 #include "mindspore/ccsrc/pybind_api/ir/tensor_py.h"
39 #include "mindspore/ccsrc/plugin/device/cpu/kernel/pyexecute/py_execute_cpu_kernel.h"
40 #include "mindspore/ccsrc/pipeline/jit/ps/parse/resolve.h"
41
42 namespace py = pybind11;
43 namespace mindspore {
44 namespace abstract {
45 using PyObjectWrapperPtr = std::shared_ptr<parse::PyObjectWrapper>;
46 namespace pyexecute_user_data_catcher {
PyExecuteUserDataCatcher(const AbstractBasePtr & element_abs)47 std::pair<bool, ValuePtr> PyExecuteUserDataCatcher(const AbstractBasePtr &element_abs) {
48 MS_EXCEPTION_IF_NULL(element_abs);
49 if (element_abs->has_user_data<kernel::PyExecuteOutputUserData>()) {
50 const auto &data = element_abs->user_data<kernel::PyExecuteOutputUserData>();
51 MS_EXCEPTION_IF_NULL(data);
52 auto python_obj = std::make_shared<parse::PyObjectWrapper>(data->obj, "graph python obj");
53 return {true, python_obj};
54 }
55 return {false, nullptr};
56 }
57
58 struct PyExecuteUserDataCatcherRegister {
PyExecuteUserDataCatcherRegisterPyExecuteUserDataCatcherRegister59 PyExecuteUserDataCatcherRegister() noexcept {
60 abstract::AbstractBase::set_pyexecute_user_data_catcher(
61 [](const AbstractBasePtr &element_abs) { return PyExecuteUserDataCatcher(element_abs); });
62 }
~PyExecuteUserDataCatcherRegisterPyExecuteUserDataCatcherRegister63 ~PyExecuteUserDataCatcherRegister() {}
64 } pyexecute_user_data_catcher_register;
65 } // namespace pyexecute_user_data_catcher
66 } // namespace abstract
67
ContainStubTensor(const py::object & obj)68 bool ContainStubTensor(const py::object &obj) {
69 if (py::isinstance<py::list>(obj)) {
70 auto list_obj = py::cast<py::list>(obj);
71 return std::any_of(list_obj.begin(), list_obj.end(),
72 [](const auto &e) { return ContainStubTensor(py::cast<py::object>(e)); });
73 }
74 if (py::isinstance<py::tuple>(obj)) {
75 auto tuple_obj = py::cast<py::tuple>(obj);
76 return std::any_of(tuple_obj.begin(), tuple_obj.end(),
77 [](const auto &e) { return ContainStubTensor(py::cast<py::object>(e)); });
78 }
79 if (py::isinstance<py::dict>(obj)) {
80 auto dict_obj = py::cast<py::dict>(obj);
81 return std::any_of(dict_obj.begin(), dict_obj.end(), [](const auto &e) {
82 return ContainStubTensor(py::cast<py::object>(e.first)) || ContainStubTensor(py::cast<py::object>(e.second));
83 });
84 }
85 return IsStubTensor(obj);
86 }
87
88 class PyExecuteInitializer {
89 public:
PyExecuteInitializer()90 PyExecuteInitializer() {
91 mindspore::ops::PyExecuteInfer::set_infer_handler(CppInferShapeAndTypePy);
92 mindspore::opt::set_launch_handler(CppInferShapeAndTypePy);
93 }
94
95 ~PyExecuteInitializer() = default;
96
97 private:
GetValueByAbstract(const abstract::AbstractBase * abstract)98 static ValuePtr GetValueByAbstract(const abstract::AbstractBase *abstract) {
99 MS_EXCEPTION_IF_NULL(abstract);
100 if (!abstract->isa<kernel::KernelTensor>()) {
101 MS_LOG(EXCEPTION) << "Invalid kernel tensor:" << abstract->ToString();
102 }
103 const auto &kernel_tensor = dynamic_cast<const kernel::KernelTensor *>(abstract);
104 MS_EXCEPTION_IF_NULL(kernel_tensor);
105 if (kernel_tensor->user_data() != nullptr) {
106 return std::make_shared<parse::PyObjectWrapper>(
107 kernel_tensor->user_data()->get<kernel::PyExecuteOutputUserData>(kernel::PyExecuteOutputUserData::key)->obj,
108 "graph python obj");
109 }
110
111 if (kernel_tensor->GetValueTrack() != nullptr && !kernel_tensor->GetValueTrack()->isa<ValueAny>()) {
112 return kernel_tensor->GetValueTrack();
113 } else if (IsShapeEmpty(kernel_tensor->GetShapeVector())) {
114 auto type_id =
115 (kernel_tensor->dtype_id() == TypeId::kTypeUnknown ? TypeId::kNumberTypeInt64 : kernel_tensor->dtype_id());
116 return std::make_shared<tensor::Tensor>(type_id, kernel_tensor->GetShapeVector());
117 }
118
119 MS_LOG(DEBUG) << "Type:" << kernel_tensor->dtype_id() << " shape:" << kernel_tensor->GetShapeVector()
120 << " size:" << kernel_tensor->size();
121 auto real_value = kernel_tensor->GetValue();
122 MS_EXCEPTION_IF_NULL(real_value);
123 if (!real_value->isa<KernelTensorValue>()) {
124 MS_LOG(EXCEPTION) << "Invalid kernel tensor value:" << real_value->ToString();
125 }
126
127 auto kernel_tensor_value = real_value->cast<KernelTensorValuePtr>();
128 MS_EXCEPTION_IF_NULL(kernel_tensor_value);
129 if (kernel_tensor->GetType() != nullptr && kernel_tensor->GetType()->isa<Number>()) {
130 return common::AnfAlgo::ValueToScalar(kernel_tensor_value, kernel_tensor->GetType()->type_id());
131 }
132
133 tensor::TensorPtr tensor =
134 std::make_shared<tensor::Tensor>(kernel_tensor->dtype_id(), kernel_tensor->GetShapeVector());
135 MS_EXCEPTION_IF_NULL(tensor);
136 if (LongToSize(tensor->data().nbytes()) != kernel_tensor_value->GetDataSize()) {
137 MS_LOG(EXCEPTION) << "Invalid host tensor size:" << tensor->data().nbytes()
138 << " and kernel tensor size:" << kernel_tensor_value->GetDataSize() << " for pyexecute.";
139 }
140 auto data_ptr = tensor->data_c();
141 MS_EXCEPTION_IF_NULL(data_ptr);
142 const auto &res = memcpy_s(data_ptr, kernel_tensor_value->GetDataSize(), kernel_tensor_value->GetDataPtr(),
143 kernel_tensor_value->GetDataSize());
144 if (res != EOK) {
145 MS_LOG(EXCEPTION) << "memcpy failed. res: " << res << ", for tensor:" << tensor->ToString()
146 << " size:" << kernel_tensor_value->GetDataSize();
147 }
148 return tensor;
149 }
150
ConstructEmptyTupleValue(const ValuePtr & structural)151 static ValuePtr ConstructEmptyTupleValue(const ValuePtr &structural) {
152 MS_EXCEPTION_IF_NULL(structural);
153 if (!structural->isa<ValueTuple>()) {
154 MS_LOG(EXCEPTION) << "input abstract is out of range.";
155 }
156 auto value_tuple = structural->cast_ptr<ValueTuple>();
157 MS_EXCEPTION_IF_NULL(value_tuple);
158
159 std::vector<ValuePtr> values;
160 for (size_t i = 0; i < value_tuple->size(); ++i) {
161 auto item = (*value_tuple)[i];
162 (void)values.emplace_back(ConstructEmptyTupleValue(item));
163 }
164
165 return std::make_shared<ValueTuple>(values);
166 }
167
ConstructInputValue(const ValuePtr & value,const std::vector<abstract::AbstractBase * > & input_abstract,size_t input_index)168 static std::pair<ValuePtr, size_t> ConstructInputValue(const ValuePtr &value,
169 const std::vector<abstract::AbstractBase *> &input_abstract,
170 size_t input_index) {
171 MS_EXCEPTION_IF_NULL(value);
172 auto begin_iter = input_abstract.begin() + input_index;
173 if (value->isa<ValueSequence>()) {
174 size_t offset = 0;
175 std::vector<ValuePtr> values;
176 auto seq_value = value->cast_ptr<ValueSequence>();
177 for (size_t i = 0; i < seq_value->size(); ++i) {
178 auto [value, offset_inner] = ConstructInputValue((*seq_value)[i], input_abstract, input_index + offset);
179 MS_EXCEPTION_IF_NULL(value);
180 (void)values.emplace_back(value);
181 offset += offset_inner;
182 }
183 (void)std::for_each(begin_iter, begin_iter + offset,
184 [](const auto &abs) -> void { MS_LOG(DEBUG) << "The convert abs is :" << abs->ToString(); });
185 return std::make_pair(std::make_shared<ValueTuple>(values), offset);
186 }
187
188 const auto num_value = GetValue<int64_t>(value);
189
190 constexpr auto kNotDynamicFlag = -1;
191 if (num_value == kNotDynamicFlag) {
192 return std::make_pair(GetValueByAbstract(*begin_iter), 1);
193 } else {
194 MS_LOG(EXCEPTION) << "The attr of structural must all value -1 but got " << num_value;
195 }
196 }
197
ConstructInputValues(const PrimitivePtr & prim,const std::vector<abstract::AbstractBase * > & input_abstract)198 static ValuePtr ConstructInputValues(const PrimitivePtr &prim,
199 const std::vector<abstract::AbstractBase *> &input_abstract) {
200 MS_EXCEPTION_IF_NULL(prim);
201 auto input_structural = prim->GetAttr(kAttrTupleInputStructural);
202 if (input_structural == nullptr) {
203 MS_LOG(EXCEPTION) << "Invalid primitive:" << prim->ToString();
204 }
205 auto tuple_structural_value = input_structural->cast_ptr<ValueSequence>();
206 MS_EXCEPTION_IF_NULL(tuple_structural_value);
207
208 std::vector<ValuePtr> values;
209 size_t input_index = 0;
210
211 for (size_t i = 0; i < tuple_structural_value->size(); ++i) {
212 auto item = (*tuple_structural_value)[i];
213 MS_EXCEPTION_IF_NULL(item);
214 if (input_abstract.size() <= input_index) {
215 // The Ori Node : Oper(a, b, ()) ==> Oper(a, b) with structural --> (-1, -1 , ())
216 // The abstract size will be smaller than the attr of tuple input structural.
217 (void)values.emplace_back(ConstructEmptyTupleValue(item));
218 }
219 auto [value, offset] = ConstructInputValue(item, input_abstract, input_index);
220 input_index += offset;
221 (void)values.emplace_back(value);
222 MS_LOG(DEBUG) << "Rectify abs :" << item->ToString() << ", from structural " << value->ToString();
223 }
224
225 return std::make_shared<ValueTuple>(values);
226 }
227
PyExecuteInferPy(const PrimitivePtr & primitive,const ValuePtr & input_value)228 static abstract::AbstractBasePtr PyExecuteInferPy(const PrimitivePtr &primitive, const ValuePtr &input_value) {
229 MS_EXCEPTION_IF_NULL(input_value);
230 if (!input_value->isa<ValueSequence>()) {
231 MS_LOG(EXCEPTION) << "Invalid pyexecute input value:" << input_value->ToString();
232 }
233 const auto &tuple_values = input_value->cast<ValueSequencePtr>();
234 MS_EXCEPTION_IF_NULL(tuple_values);
235 const auto &inputs = tuple_values->value();
236 constexpr auto number_two = 2;
237 if (inputs.size() <= number_two) {
238 MS_LOG(EXCEPTION) << "Invalid pyexecute input value:" << input_value->ToString();
239 }
240
241 if (!inputs[0]->isa<StringImm>()) {
242 MS_LOG(EXCEPTION) << "Invalid script value:" << inputs[0]->ToString();
243 }
244 const auto &script = dyn_cast<StringImm>(inputs[0]);
245
246 if (!inputs[1]->isa<ValueSequence>()) {
247 MS_LOG(WARNING) << "The keys is not tuple value, but got " << inputs[1]->ToString();
248 return abstract::MakeAbstract(std::make_shared<abstract::Shape>(ShapeVector({1})), kFloat64);
249 }
250 const auto &keys = dyn_cast<ValueSequence>(inputs[1]);
251 MS_EXCEPTION_IF_NULL(keys);
252
253 if (!inputs[number_two]->isa<ValueSequence>()) {
254 MS_LOG(DEBUG) << "The values is not tuple value, but got " << inputs[number_two]->ToString();
255 return abstract::MakeAbstract(std::make_shared<abstract::Shape>(ShapeVector({1})), kFloat64);
256 }
257 const auto &values = dyn_cast<ValueSequence>(inputs[number_two]);
258 MS_EXCEPTION_IF_NULL(values);
259
260 MS_LOG(DEBUG) << "The script is: " << script->ToString() << ", keys: " << keys->ToString()
261 << ", values: " << values->ToString();
262 if (keys->size() != values->size()) {
263 MS_LOG(EXCEPTION) << "The length of keys(" << keys->size() << ") is not equal of the length of values("
264 << values->size() << ").";
265 }
266
267 py::gil_scoped_acquire gil_acquire;
268 py::dict local_dict;
269 for (size_t i = 0; i < keys->size(); ++i) {
270 const auto &key = (*keys)[i];
271 const auto &key_str = dyn_cast<StringImm>(key);
272 MS_EXCEPTION_IF_NULL(key_str);
273
274 const auto &value = (*values)[i];
275 MS_EXCEPTION_IF_NULL(value);
276 auto obj = ValueToPyData(value);
277 local_dict[py::str(key_str->value())] = obj;
278 }
279
280 const auto &py_script = py::str(script->value());
281 auto params = py::tuple(number_two);
282 params[0] = py::dict();
283 params[1] = local_dict;
284 MS_LOG(DEBUG) << "Python script: " << py_script << ", local_dict: " << local_dict;
285 try {
286 mindspore::ScopedFallbackRunning fallback_running;
287 const auto &output = parse::data_converter::CallPythonScript(py_script, params);
288 if (ContainStubTensor(output)) {
289 MS_EXCEPTION(TypeError) << "PyExecute node output can not contain stub tensor.";
290 }
291 MS_LOG(DEBUG) << "Python output type: " << py::str(output.get_type()) << ", output: " << output;
292 primitive->set_attr(kAttrPyExecuteOutput, std::make_shared<parse::PyObjectWrapper>(output, "graph python obj"));
293 if (py::isinstance<tensor::Tensor>(output) || IsStubTensor(output)) {
294 const auto &tensor = IsStubTensor(output) ? ConvertStubTensor(output) : output.cast<tensor::TensorPtr>();
295 const auto &infer_shape = std::make_shared<abstract::Shape>(tensor->shape());
296 return tensor->ToAbstract();
297 } else if (py::isinstance<py::bool_>(output)) {
298 return std::make_shared<tensor::Tensor>(py::cast<bool>(output))->ToAbstract();
299 } else if (py::isinstance<py::int_>(output)) {
300 return std::make_shared<tensor::Tensor>(py::cast<int64_t>(output))->ToAbstract();
301 } else if (py::isinstance<py::float_>(output)) {
302 return std::make_shared<tensor::Tensor>(py::cast<float>(output))->ToAbstract();
303 }
304 } catch (const py::error_already_set &e) {
305 auto error_type_name = py::cast<std::string>(python_adapter::GetPyObjAttr(e.type(), "__name__"));
306 auto error_iter = exception_types_map.find(error_type_name);
307 if (error_iter != exception_types_map.end()) {
308 auto &handler = LogWriter::GetExceptionHandler();
309 if (handler != nullptr) {
310 handler(error_iter->second, py::str(e.value()));
311 }
312 }
313 throw std::runtime_error(py::str(e.value()));
314 }
315
316 const auto &infer_shape = std::make_shared<abstract::Shape>(ShapeVector({1}));
317 return abstract::MakeAbstract(infer_shape, kFloat64);
318 }
319
CppInferShapeAndTypePy(const PrimitivePtr & primitive,const std::vector<abstract::AbstractBase * > & args_abs_list)320 static abstract::AbstractBasePtr CppInferShapeAndTypePy(const PrimitivePtr &primitive,
321 const std::vector<abstract::AbstractBase *> &args_abs_list) {
322 // We can't catch the pybind11 exception by py::builtin_exception or its base class,
323 // so we have to list all pybind11 exceptions and catch one by one here.
324 AbstractBasePtr res;
325 std::function<void(void)> already_set_error_handler;
326 std::function<void(void)> other_error_handler;
327 std::function<void(void)> default_error_handler;
328 HandleExceptionRethrow(
329 [&res, &primitive, &args_abs_list]() {
330 res = PyExecuteInferPy(primitive, ConstructInputValues(primitive, args_abs_list));
331 MS_LOG(DEBUG) << "The abstract:" << res;
332 return res;
333 },
334 already_set_error_handler, other_error_handler, default_error_handler);
335 return res;
336 }
337 };
338
339 static PyExecuteInitializer py_execute_initializer;
340 } // namespace mindspore
341 #endif // MINDSPORE_CCSRC_PYBINDAPI_IR_PY_EXECUTE_PY_H_
342