1 /**
2 * Copyright 2019-2023 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "include/common/utils/convert_utils_py.h"
18
19 #include <vector>
20 #include <string>
21 #include <memory>
22 #include <algorithm>
23 #include <list>
24 #include <utility>
25 #include <cfloat>
26
27 #include "mindspore/core/ops/framework_ops.h"
28 #include "Eigen/Core"
29 #include "abstract/abstract_value.h"
30 #include "abstract/utils.h"
31 #include "pipeline/jit/ps/parse/parse_base.h"
32 #include "pipeline/jit/ps/parse/resolve.h"
33 #include "ir/value.h"
34 #include "ir/anf.h"
35 #include "ir/tensor.h"
36 #include "ir/param_info.h"
37 #include "pybind_api/ir/base_ref_py.h"
38 #include "ir/dtype/tensor_type.h"
39 #include "utils/ms_context.h"
40 #include "include/common/fallback.h"
41 #include "include/common/utils/stub_tensor.h"
42 #include "include/common/utils/convert_utils.h"
43
44 namespace mindspore {
45 py::object BuiltinsToPyData(const Any &value);
46 py::object BuiltinsToPyData(const BaseRef &value);
47 py::object VectorToPyData(const Any &value);
48 py::object VectorRefToPyData(const VectorRef &value_list, const AbstractBasePtr &abs = nullptr);
49 py::object MakeCSRTensor(const VectorRef &value_list);
50 py::object MakeCSRTensor(const ValuePtr &value);
51 py::object MakeCOOTensor(const VectorRef &value_list);
52 py::object MakeCOOTensor(const ValuePtr &value);
53 ShapeVector ConvertShapeTupleToShapeVector(const ValueTuplePtr &shape_tuple);
54 ShapeVector ConvertToShapeVector(const VectorRef &value_list, size_t shape_idx);
55
56 // For AbstractSequence and AbstractDictionary.
57 template <typename T>
CheckAbstractElementsSize(const AbstractBasePtr & abs_value,size_t value_size)58 T CheckAbstractElementsSize(const AbstractBasePtr &abs_value, size_t value_size) {
59 if (abs_value == nullptr) {
60 return nullptr;
61 }
62 auto abs = abs_value->cast<T>();
63 if (abs != nullptr && value_size != abs->size()) {
64 MS_LOG(EXCEPTION) << "The size of elements should be equal to " << value_size << ", but got " << abs->size();
65 }
66 return abs;
67 }
68
SetAdaptedAttrToTensor(const py::object & tensor,const AbstractBasePtr & abs)69 py::object SetAdaptedAttrToTensor(const py::object &tensor, const AbstractBasePtr &abs) {
70 if (abs == nullptr || !abs->isa<abstract::AbstractTensor>()) {
71 return tensor;
72 }
73 auto tensor_abs = abs->cast<abstract::AbstractTensorPtr>();
74 if (tensor_abs->is_adapter()) {
75 py::setattr(tensor, "adapter_flag", py::bool_(true));
76 } else {
77 py::setattr(tensor, "adapter_flag", py::bool_(false));
78 }
79 return tensor;
80 }
81
CheckAndConvertToScalar(const tensor::BaseTensorPtr & tensor,const AbstractBasePtr & abs)82 py::object CheckAndConvertToScalar(const tensor::BaseTensorPtr &tensor, const AbstractBasePtr &abs) {
83 if (abs == nullptr || !abs->isa<abstract::AbstractScalar>()) {
84 return py::none();
85 }
86 tensor->data_sync();
87 auto *data = tensor->data_c();
88 auto type = abs->BuildType()->type_id();
89 switch (type) {
90 case kNumberTypeBool:
91 return py::bool_(*reinterpret_cast<const bool *>(data));
92 case kNumberTypeInt16:
93 return py::int_(*reinterpret_cast<const int16_t *>(data));
94 case kNumberTypeUInt16:
95 return py::int_(*reinterpret_cast<const uint16_t *>(data));
96 case kNumberTypeInt8:
97 return py::int_(*reinterpret_cast<const int8_t *>(data));
98 case kNumberTypeUInt8:
99 return py::int_(*reinterpret_cast<const uint8_t *>(data));
100 case kNumberTypeInt32:
101 return py::int_(*reinterpret_cast<const int32_t *>(data));
102 case kNumberTypeUInt32:
103 return py::int_(*reinterpret_cast<const uint32_t *>(data));
104 case kNumberTypeInt64:
105 return py::int_(*reinterpret_cast<const int64_t *>(data));
106 case kNumberTypeUInt64:
107 return py::int_(*reinterpret_cast<const uint64_t *>(data));
108 case kNumberTypeFloat16: {
109 const Eigen::half_impl::__half_raw data_half(*reinterpret_cast<const uint16_t *>(data));
110 return py::float_(Eigen::half_impl::half_to_float(data_half));
111 }
112 case kNumberTypeFloat32:
113 return py::float_(*reinterpret_cast<const float *>(data));
114 case kNumberTypeFloat64:
115 return py::float_(*reinterpret_cast<const double *>(data));
116 case kNumberTypeBFloat16: {
117 const Eigen::half_impl::__half_raw data_half(*reinterpret_cast<const uint16_t *>(data));
118 return py::float_(Eigen::half_impl::half_to_float(data_half));
119 }
120 default:
121 return py::none();
122 }
123 }
124
CSRTensorToPyData(const tensor::CSRTensorPtr & csr_tensor)125 py::object CSRTensorToPyData(const tensor::CSRTensorPtr &csr_tensor) {
126 auto ref = py::tuple(1);
127 ref[0] = csr_tensor;
128 return ref[0];
129 }
130
COOTensorToPyData(const tensor::COOTensorPtr & coo_tensor)131 py::object COOTensorToPyData(const tensor::COOTensorPtr &coo_tensor) {
132 auto ref = py::tuple(1);
133 ref[0] = coo_tensor;
134 return ref[0];
135 }
136
TensorToPyData(const tensor::BaseTensorPtr & tensor,const AbstractBasePtr & abs)137 py::object TensorToPyData(const tensor::BaseTensorPtr &tensor, const AbstractBasePtr &abs) {
138 MS_EXCEPTION_IF_NULL(tensor);
139 auto scalar_obj = CheckAndConvertToScalar(tensor, abs);
140 if (!py::isinstance<py::none>(scalar_obj)) {
141 return scalar_obj;
142 }
143
144 py::tuple v(1);
145 v[0] = tensor;
146 v[0] = SetAdaptedAttrToTensor(v[0], abs);
147 return v[0];
148 }
149
ScalarPtrToPyData(const ScalarPtr & value)150 py::object ScalarPtrToPyData(const ScalarPtr &value) {
151 constexpr double eps = 1e-6;
152 py::int_ int_v;
153 py::float_ float_v;
154 py::bool_ bool_v;
155 TypeId scalar_type = value->type()->type_id();
156 float float_value;
157 double doubel_value;
158 switch (scalar_type) {
159 case kNumberTypeUInt8:
160 MS_LOG(DEBUG) << "uint8";
161 int_v = value->cast<UInt8ImmPtr>()->value();
162 return int_v;
163 case kNumberTypeUInt16:
164 MS_LOG(DEBUG) << "uint16";
165 int_v = value->cast<UInt16ImmPtr>()->value();
166 return int_v;
167 case kNumberTypeUInt32:
168 MS_LOG(DEBUG) << "uint32";
169 int_v = value->cast<UInt32ImmPtr>()->value();
170 return int_v;
171 case kNumberTypeUInt64:
172 MS_LOG(DEBUG) << "uint64";
173 int_v = value->cast<UInt64ImmPtr>()->value();
174 return int_v;
175 case kNumberTypeInt8:
176 MS_LOG(DEBUG) << "int8";
177 int_v = value->cast<Int8ImmPtr>()->value();
178 return int_v;
179 case kNumberTypeInt16:
180 MS_LOG(DEBUG) << "int16";
181 int_v = value->cast<Int16ImmPtr>()->value();
182 return int_v;
183 case kNumberTypeInt32:
184 MS_LOG(DEBUG) << "int32";
185 int_v = value->cast<Int32ImmPtr>()->value();
186 return int_v;
187 case kNumberTypeInt64:
188 MS_LOG(DEBUG) << "int64";
189 int_v = value->cast<Int64ImmPtr>()->value();
190 return int_v;
191 case kNumberTypeFloat32:
192 MS_LOG(DEBUG) << "float";
193 float_value = value->cast<FP32ImmPtr>()->value();
194 doubel_value = value->cast<FP32ImmPtr>()->prim_value();
195 // If double value is default value 0, don't use double value.
196 if (std::abs(doubel_value) > std::numeric_limits<double>::epsilon() &&
197 std::abs(float_value - doubel_value) < eps) {
198 float_v = doubel_value;
199 } else {
200 float_v = float_value;
201 }
202 return float_v;
203 case kNumberTypeFloat64:
204 MS_LOG(DEBUG) << "double";
205 float_v = value->cast<FP64ImmPtr>()->value();
206 return float_v;
207 case kNumberTypeBool:
208 MS_LOG(DEBUG) << "bool";
209 bool_v = value->cast<BoolImmPtr>()->value();
210 return bool_v;
211 default:
212 MS_EXCEPTION(TypeError) << "Unsupported scalar converted to py data: " << value->ToString();
213 }
214 }
215
ValueSequenceToPyData(const ValueSequencePtr & value,const AbstractBasePtr & abs)216 py::object ValueSequenceToPyData(const ValueSequencePtr &value, const AbstractBasePtr &abs) {
217 auto value_sequeue = value->value();
218 auto value_size = value_sequeue.size();
219 if (value_size == 0) {
220 // If output size of value sequence is 0, return an empty sequence.
221 py::tuple res_sequeue(value_size);
222 if (value->isa<ValueTuple>()) {
223 return res_sequeue;
224 }
225 return res_sequeue.cast<py::list>();
226 }
227 // Convert ValueNamedTuple whose object's type is tuple and size is not 0.
228 if (value->isa<ValueNamedTuple>()) {
229 auto value_named_tuple = value->cast<ValueNamedTuplePtr>();
230 MS_LOG(DEBUG) << "Convert ValueNamedTuple: " << value_named_tuple->ToString();
231 auto keys = value_named_tuple->key();
232 auto keys_size = keys.size();
233 py::tuple key_sequeue(keys_size);
234 py::tuple ele_sequeue(keys_size);
235 for (size_t i = 0; i < keys_size; i++) {
236 key_sequeue[i] = ValueToPyData(keys[i]);
237 ele_sequeue[i] = ValueToPyData(value_sequeue[i]);
238 }
239 py::module mod = python_adapter::GetPyModule(parse::PYTHON_MOD_PARSE_MODULE);
240 py::str sub_class_name = py::str(value_named_tuple->sub_class_name());
241 py::object named_tuple = python_adapter::CallPyModFn(mod, parse::PYTHON_MOD_CONVERT_TO_NAMEDTUPLE, sub_class_name,
242 key_sequeue, ele_sequeue);
243 return named_tuple;
244 }
245 py::tuple res_sequeue(value_size);
246 if (abs != nullptr && abs->isa<abstract::AbstractSequence>() &&
247 abs->cast<abstract::AbstractSequencePtr>()->dynamic_len()) {
248 // Dynamic length sequence directly use value to create python object.
249 for (size_t i = 0; i < value_size; i++) {
250 res_sequeue[i] = ValueToPyData(value_sequeue[i]);
251 }
252 } else {
253 auto abs_sequeue = CheckAbstractElementsSize<abstract::AbstractSequencePtr>(abs, value_size);
254 if (abs_sequeue == nullptr) {
255 for (size_t i = 0; i < value_size; i++) {
256 res_sequeue[i] = ValueToPyData(value_sequeue[i]);
257 }
258 } else {
259 for (size_t i = 0; i < value_size; i++) {
260 res_sequeue[i] = ValueToPyData(value_sequeue[i], abs_sequeue->elements()[i]);
261 }
262 }
263 }
264 if (value->isa<ValueTuple>()) {
265 return res_sequeue;
266 }
267 return res_sequeue.cast<py::list>();
268 }
269
ValueDictionaryToPyData(const ValueDictionaryPtr & value,const AbstractBasePtr & abs)270 py::object ValueDictionaryToPyData(const ValueDictionaryPtr &value, const AbstractBasePtr &abs) {
271 auto value_dict = value->value();
272 auto value_size = value_dict.size();
273 py::dict res_dict;
274 auto abs_dict = CheckAbstractElementsSize<abstract::AbstractDictionaryPtr>(abs, value_size);
275 if (abs_dict == nullptr) {
276 for (const auto &v : value_dict) {
277 res_dict[ValueToPyData(v.first)] = ValueToPyData(v.second);
278 }
279 } else {
280 for (size_t i = 0; i < value_size; i++) {
281 auto v = value_dict[i];
282 auto abs_elem = abs_dict->elements()[i];
283 res_dict[ValueToPyData(v.first, abs_elem.first)] = ValueToPyData(v.second, abs_elem.second);
284 }
285 }
286 return res_dict;
287 }
288
289 using ConverterFunction = std::function<py::object(const ValuePtr &value, const AbstractBasePtr &abs)>;
290 using ValueNameToConverterVector = std::vector<std::pair<uint32_t, ConverterFunction>>;
291
292 // (Value Type Name) -> (Converter Function)
293 // The converter function is used to convert Value object to Python data object.
294 static ValueNameToConverterVector value_name_to_converter = {
295 // Scalar
296 {Scalar::kTypeId,
__anond775c6610102() 297 [](const ValuePtr &value, const AbstractBasePtr &) -> py::object {
298 return ScalarPtrToPyData(value->cast<ScalarPtr>());
299 }},
300 // Tensor
301 {tensor::Tensor::kTypeId,
__anond775c6610202() 302 [](const ValuePtr &value, const AbstractBasePtr &abs) -> py::object {
303 auto tensor_ptr = value->cast<tensor::TensorPtr>();
304 return TensorToPyData(tensor_ptr, abs);
305 }},
306 // BaseTensor
307 {tensor::BaseTensor::kTypeId,
__anond775c6610302() 308 [](const ValuePtr &value, const AbstractBasePtr &abs) -> py::object {
309 auto tensor_ptr = value->cast<tensor::BaseTensorPtr>();
310 return TensorToPyData(std::make_shared<tensor::Tensor>(*tensor_ptr), abs);
311 }},
312 // MetaTenser
313 {tensor::MetaTensor::kTypeId,
__anond775c6610402() 314 [](const ValuePtr &value, const AbstractBasePtr &) -> py::object {
315 py::tuple tuple_container(1);
316 tuple_container[0] = value->cast<tensor::MetaTensorPtr>();
317 return tuple_container[0];
318 }},
319 // CSRTensor
320 {tensor::CSRTensor::kTypeId,
__anond775c6610502() 321 [](const ValuePtr &value, const AbstractBasePtr &) -> py::object {
322 auto csr_tensor_ptr = value->cast<tensor::CSRTensorPtr>();
323 return CSRTensorToPyData(csr_tensor_ptr);
324 }},
325 // COOTensor
326 {tensor::COOTensor::kTypeId,
__anond775c6610602() 327 [](const ValuePtr &value, const AbstractBasePtr &) -> py::object {
328 auto coo_tensor_ptr = value->cast<tensor::COOTensorPtr>();
329 return COOTensorToPyData(coo_tensor_ptr);
330 }},
331 // RefKey
332 {RefKey::kTypeId,
__anond775c6610702() 333 [](const ValuePtr &value, const AbstractBasePtr &) -> py::object {
334 py::tuple tuple_container(1);
335 tuple_container[0] = value->cast<RefKeyPtr>();
336 return tuple_container[0];
337 }},
338 // Type
339 {Type::kTypeId,
__anond775c6610802() 340 [](const ValuePtr &value, const AbstractBasePtr &) -> py::object {
341 py::tuple tuple_container(1);
342 tuple_container[0] = value->cast<TypePtr>();
343 return tuple_container[0];
344 }},
345 // StringImm
346 {StringImm::kTypeId,
__anond775c6610902() 347 [](const ValuePtr &value, const AbstractBasePtr &) -> py::object {
348 py::str res = value->cast<StringImmPtr>()->value();
349 return res;
350 }},
351 // ValueSequence
352 {ValueSequence::kTypeId,
__anond775c6610a02() 353 [](const ValuePtr &value, const AbstractBasePtr &abs) -> py::object {
354 auto value_sequeue = value->cast<ValueSequencePtr>();
355 return ValueSequenceToPyData(value_sequeue, abs);
356 }},
357 // ValueDictionary
358 {ValueDictionary::kTypeId,
__anond775c6610b02() 359 [](const ValuePtr &value, const AbstractBasePtr &abs) -> py::object {
360 auto value_dict = value->cast<ValueDictionaryPtr>();
361 return ValueDictionaryToPyData(value_dict, abs);
362 }},
363 // ValueSlice
364 {ValueSlice::kTypeId,
__anond775c6610c02() 365 [](const ValuePtr &value, const AbstractBasePtr &) -> py::object {
366 auto slice = value->cast<ValueSlicePtr>();
367 auto start = ValueToPyData(slice->start());
368 auto end = ValueToPyData(slice->stop());
369 auto step = ValueToPyData(slice->step());
370 return python_adapter::CallPyFn(parse::PYTHON_MOD_PARSE_MODULE, parse::PYTHON_PARSE_CLASS_SLICE, start, end, step);
371 }},
372 // KeywordArg
373 {KeywordArg::kTypeId,
__anond775c6610d02() 374 [](const ValuePtr &value, const AbstractBasePtr &) -> py::object {
375 auto abs_keyword_arg = value->ToAbstract()->cast<abstract::AbstractKeywordArgPtr>();
376 auto key = abs_keyword_arg->get_key();
377 auto val = abs_keyword_arg->get_arg()->BuildValue();
378 auto py_value = ValueToPyData(val);
379 auto kwargs = py::kwargs();
380 kwargs[key.c_str()] = py_value;
381 return kwargs;
382 }},
383 // parse::NameSpace
384 {parse::NameSpace::kTypeId,
__anond775c6610e02() 385 [](const ValuePtr &value, const AbstractBasePtr &) -> py::object {
386 auto ns = value->cast<parse::NameSpacePtr>();
387 return ns->module_obj();
388 }},
389 // parse::ClassType
390 {parse::ClassType::kTypeId,
__anond775c6610f02() 391 [](const ValuePtr &value, const AbstractBasePtr &) -> py::object {
392 auto class_type = value->cast<parse::ClassTypePtr>();
393 return class_type->obj();
394 }},
395 // parse::MsClassObject
396 {parse::MsClassObject::kTypeId,
__anond775c6611002() 397 [](const ValuePtr &value, const AbstractBasePtr &) -> py::object {
398 auto ms_class_object = value->cast<parse::MsClassObjectPtr>();
399 return ms_class_object->obj();
400 }},
401 // parse::InterpretedObject
402 {parse::InterpretedObject::kTypeId,
__anond775c6611102() 403 [](const ValuePtr &value, const AbstractBasePtr &) -> py::object {
404 auto interpreted_object = value->cast<parse::InterpretedObjectPtr>();
405 return interpreted_object->obj();
406 }},
407 // parse::PyObjectWrapper
408 {parse::PyObjectWrapper::kTypeId,
__anond775c6611202() 409 [](const ValuePtr &value, const AbstractBasePtr &) -> py::object {
410 auto py_object = value->cast<parse::PyObjectWrapperPtr>();
411 return py_object->obj();
412 }},
413 // None
__anond775c6611302() 414 {None::kTypeId, [](const ValuePtr &, const AbstractBasePtr &) -> py::object { return py::none(); }},
415 // ValueAny
__anond775c6611402() 416 {ValueAny::kTypeId, [](const ValuePtr &, const AbstractBasePtr &) -> py::object { return py::none(); }},
417 // ValueProblem
__anond775c6611502() 418 {ValueProblem::kTypeId, [](const ValuePtr &, const AbstractBasePtr &) -> py::object { return py::none(); }},
419 // FuncGraph
__anond775c6611602() 420 {FuncGraph::kTypeId, [](const ValuePtr &, const AbstractBasePtr &) -> py::object { return py::none(); }},
421 // Primitive
__anond775c6611702() 422 {Primitive::kTypeId, [](const ValuePtr &, const AbstractBasePtr &) -> py::object { return py::none(); }},
423 // Monad
__anond775c6611802() 424 {Monad::kTypeId, [](const ValuePtr &, const AbstractBasePtr &) -> py::object { return py::none(); }},
425 // Ellipsis
__anond775c6611902() 426 {Ellipsis::kTypeId, [](const ValuePtr &, const AbstractBasePtr &) -> py::object { return py::ellipsis(); }}};
427
428 // When converting data to tensor, ValueToPyData will only return _c_expression Tensor,
429 // but not python tensor. If python tensor is needed, call _convert_python_data to the output.
ValueToPyData(const ValuePtr & value,const AbstractBasePtr & abs)430 py::object ValueToPyData(const ValuePtr &value, const AbstractBasePtr &abs) {
431 if (value == nullptr) {
432 MS_LOG(EXCEPTION) << "The `value` should not be null";
433 }
434 py::gil_scoped_acquire gil;
435 for (auto &iter : value_name_to_converter) {
436 if (value->IsFromTypeId(iter.first)) {
437 return iter.second(value, abs);
438 }
439 }
440 MS_LOG(EXCEPTION) << "Unsupported to convert " << value->ToString() << "[" << value->type_name() << "] to a PyData";
441 }
442
AnyToPyData(const Any & value)443 py::object AnyToPyData(const Any &value) {
444 py::object ret;
445 MS_LOG(DEBUG) << "AnyToPyData " << value.GetString();
446 if (value.is<int>() || value.is<float>() || value.is<double>() || value.is<bool>()) {
447 ret = BuiltinsToPyData(value);
448 } else if (value.is<ValuePtr>()) {
449 MS_LOG(DEBUG) << "ValuePtr";
450 ValuePtr v = value.cast<ValuePtr>();
451 ret = ValueToPyData(v);
452 } else if (value.is<py::object>()) {
453 MS_LOG(DEBUG) << "py obj";
454 ret = value.cast<py::object>();
455 } else if (value.is<std::vector<tensor::TensorPtr>>() || value.is<std::vector<Any>>()) {
456 ret = VectorToPyData(value);
457 } else if (value.is<std::list<Any>>()) {
458 MS_LOG(DEBUG) << "list_any";
459 auto value_list = value.cast<std::list<Any>>();
460 py::list rets = py::list();
461 for (auto &v : value_list) {
462 rets.append(AnyToPyData(v));
463 }
464 ret = rets;
465 } else if (value.is<std::vector<Any>>()) {
466 auto value_list = value.cast<std::vector<Any>>();
467 py::tuple rets(value_list.size());
468 for (size_t i = 0; i < value_list.size(); i++) {
469 rets[i] = AnyToPyData(value_list[i]);
470 }
471 ret = rets;
472 } else if (value.is<TypePtr>()) {
473 py::tuple v(1);
474 v[0] = value.cast<TypePtr>();
475 ret = v[0];
476 } else {
477 MS_LOG(EXCEPTION) << "value is not support type";
478 }
479 return ret;
480 }
481
BaseRefToPyData(const BaseRef & value,const AbstractBasePtr & abs)482 py::object BaseRefToPyData(const BaseRef &value, const AbstractBasePtr &abs) {
483 py::object ret;
484 MS_LOG(DEBUG) << "BaseRefToPyData " << value.ToString();
485 if (utils::isa<int>(value) || utils::isa<float>(value) || utils::isa<double>(value) || utils::isa<bool>(value)) {
486 ret = BuiltinsToPyData(value);
487 } else if (utils::isa<ValuePtr>(value)) {
488 MS_LOG(DEBUG) << "ValuePtr";
489 ValuePtr v = utils::cast<ValuePtr>(value);
490 ret = ValueToPyData(v, abs);
491 } else if (utils::isa<PyObjectRef>(value)) {
492 MS_LOG(DEBUG) << "py obj";
493 PyObjectRef py_ref = utils::cast<PyObjectRef>(value);
494 ret = py_ref.object_;
495 } else if (utils::isa<VectorRef>(value)) {
496 auto vec_ref = utils::cast<VectorRef>(value);
497 ret = VectorRefToPyData(vec_ref, abs);
498 } else if (utils::isa<TypePtr>(value)) {
499 py::tuple v(1);
500 v[0] = utils::cast<TypePtr>(value);
501 ret = v[0];
502 } else {
503 MS_LOG(EXCEPTION) << "value is not support, value:" << value.ToString();
504 }
505 return ret;
506 }
507
BuiltinsToPyData(const Any & value)508 py::object BuiltinsToPyData(const Any &value) {
509 if (value.is<int>()) {
510 MS_LOG(DEBUG) << "int";
511 py::int_ ret = value.cast<int>();
512 return ret;
513 } else if (value.is<float>()) {
514 MS_LOG(DEBUG) << "float";
515 py::float_ ret = value.cast<float>();
516 return ret;
517 } else if (value.is<double>()) {
518 MS_LOG(DEBUG) << "double";
519 py::float_ ret = value.cast<double>();
520 return ret;
521 } else {
522 MS_LOG(DEBUG) << "bool";
523 py::bool_ ret = value.cast<bool>();
524 return ret;
525 }
526 }
527
BuiltinsToPyData(const BaseRef & value)528 py::object BuiltinsToPyData(const BaseRef &value) {
529 if (utils::isa<int>(value)) {
530 MS_LOG(DEBUG) << "int";
531 py::int_ ret = utils::cast<int>(value);
532 return ret;
533 } else if (utils::isa<float>(value)) {
534 MS_LOG(DEBUG) << "float";
535 py::float_ ret = utils::cast<float>(value);
536 return ret;
537 } else if (utils::isa<double>(value)) {
538 MS_LOG(DEBUG) << "double";
539 py::float_ ret = utils::cast<double>(value);
540 return ret;
541 } else {
542 MS_LOG(DEBUG) << "bool";
543 py::bool_ ret = utils::cast<bool>(value);
544 return ret;
545 }
546 }
547
VectorToPyData(const Any & value)548 py::object VectorToPyData(const Any &value) {
549 py::object ret;
550 if (value.is<std::vector<tensor::TensorPtr>>()) {
551 MS_LOG(DEBUG) << "vector_tensor";
552 std::vector<tensor::TensorPtr> outputs;
553 outputs = value.cast<std::vector<tensor::TensorPtr>>();
554 py::tuple tensor_tuple(outputs.size());
555 for (std::size_t i = 0; i < outputs.size(); ++i) {
556 tensor_tuple[i] = *outputs[i];
557 }
558 ret = tensor_tuple;
559 } else {
560 MS_LOG(DEBUG) << "vector_any";
561 auto value_list = value.cast<std::vector<Any>>();
562 py::tuple any_tuple = py::tuple(value_list.size());
563 size_t i = 0;
564 for (auto &v : value_list) {
565 any_tuple[i] = AnyToPyData(v);
566 i++;
567 }
568 ret = any_tuple;
569 }
570 return ret;
571 }
572 template <typename T>
AbstractSequenceToPyData(const VectorRef & value_list,const AbstractBasePtr & abs)573 py::object AbstractSequenceToPyData(const VectorRef &value_list, const AbstractBasePtr &abs) {
574 auto value_size = value_list.size();
575 auto ret = T(value_size);
576 auto seq_abs = abs->cast<abstract::AbstractSequencePtr>();
577 MS_EXCEPTION_IF_NULL(seq_abs);
578 bool dynamic_len = seq_abs->dynamic_len();
579 auto dynamic_len_element_abs = seq_abs->dynamic_len_element_abs();
580 if (dynamic_len || dynamic_len_element_abs != nullptr) {
581 if (dynamic_len_element_abs == nullptr) {
582 MS_LOG(INFO) << "Dynamic length sequence with no specified element abstract convert to empty tuple.";
583 for (size_t i = 0; i < value_size; i++) {
584 ret[i] = BaseRefToPyData(value_list[i]);
585 }
586 return ret;
587 }
588 if (dynamic_len_element_abs->isa<abstract::AbstractNone>()) {
589 MS_LOG(INFO) << "Dynamic length sequence with element None convert to empty sequence.";
590 return ret;
591 }
592 for (size_t i = 0; i < value_size; ++i) {
593 ret[i] = BaseRefToPyData(value_list[i], dynamic_len_element_abs);
594 }
595 return ret;
596 }
597 const auto allow_fallback_runtime = (fallback::GetJitSyntaxLevel() >= kCompatible);
598 // If FALLBACK_RUNTIME is not enable
599 // The size of seq_abs may be larger than the size of value_list, because the backend will eliminate None.
600 size_t ref_idx = 0;
601 for (size_t i = 0; i < seq_abs->size(); i++) {
602 auto elem_abs = seq_abs->elements()[i];
603 if (elem_abs->isa<abstract::AbstractNone>() && !allow_fallback_runtime) {
604 continue;
605 }
606 ret[ref_idx] = BaseRefToPyData(value_list[ref_idx], elem_abs);
607 ref_idx++;
608 }
609 if (ref_idx != value_size) {
610 MS_LOG(EXCEPTION) << "The size of elements (excluding None) should be equal to " << value_size << ", but got "
611 << ref_idx;
612 }
613 return ret;
614 }
615
VectorRefToPyData(const VectorRef & value_list,const AbstractBasePtr & abs)616 py::object VectorRefToPyData(const VectorRef &value_list, const AbstractBasePtr &abs) {
617 py::object ret;
618 size_t value_size = value_list.size();
619 auto ref_tuple = py::tuple(value_size);
620 if (abs == nullptr) {
621 for (size_t i = 0; i < value_size; i++) {
622 ref_tuple[i] = BaseRefToPyData(value_list[i]);
623 }
624 ret = ref_tuple;
625 return ret;
626 }
627
628 if (value_size == 0 && !abs->isa<abstract::AbstractList>()) {
629 return ref_tuple;
630 }
631
632 // Current VectorRef reflects a COOTensor type
633 if (abs->isa<abstract::AbstractCSRTensor>()) {
634 return MakeCSRTensor(value_list);
635 }
636 if (abs->isa<abstract::AbstractCOOTensor>()) {
637 return MakeCOOTensor(value_list);
638 }
639 if (abs->isa<abstract::AbstractList>()) {
640 return AbstractSequenceToPyData<py::list>(value_list, abs);
641 }
642 return AbstractSequenceToPyData<py::tuple>(value_list, abs);
643 }
644
IsGraphOutputValueNodeOrParameter(const AnfNodePtr & output,const py::tuple & args,const std::shared_ptr<py::object> & ret_val)645 bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr &output, const py::tuple &args,
646 const std::shared_ptr<py::object> &ret_val) {
647 if (output->isa<ValueNode>()) {
648 MS_LOG(INFO) << "Graph's output is a constant. No need to execute.";
649 ValuePtr value = GetValueNode(output);
650 auto abs = output->abstract();
651 MS_EXCEPTION_IF_NULL(abs);
652 if (abs->isa<abstract::AbstractCSRTensor>()) {
653 *ret_val = MakeCSRTensor(value);
654 } else if (abs->isa<abstract::AbstractCOOTensor>()) {
655 *ret_val = MakeCOOTensor(value);
656 } else {
657 *ret_val = ValueToPyData(value, abs);
658 }
659 return true;
660 }
661
662 // Adapter will transform values in __init__() and construct() to parameters, this could cause
663 // inputs (a.k.a args in current function) size less than parameters'.
664 if (output->isa<Parameter>()) {
665 MS_LOG(INFO) << "Graph's output is a parameter. If all params are inputs, no need to execute.";
666 // Find the right parameter as ret_val.
667 auto func_graph = output->func_graph();
668 MS_EXCEPTION_IF_NULL(func_graph);
669 auto params = func_graph->parameters();
670 if ((args.size() + func_graph->fv_param_count()) != params.size()) {
671 MS_LOG(EXCEPTION) << "Input size " << args.size() << " add Parameter count " << func_graph->fv_param_count()
672 << " not equal to graph input size " << params.size() << ", let graph to be executed.";
673 }
674
675 auto it = std::find(params.begin(), params.end(), output);
676 if (it == params.end()) {
677 MS_EXCEPTION(UnknownError) << "When graph output is Parameter, it should be found in graph parameters";
678 }
679 size_t index = it - params.cbegin();
680 if (index >= args.size() + func_graph->fv_param_count()) {
681 MS_EXCEPTION(UnknownError) << "Index " << index << " equal or larger than args size " << args.size()
682 << " add Parameter count " << func_graph->fv_param_count() << ".";
683 }
684 if (index < args.size()) {
685 *ret_val = args[index];
686 } else {
687 auto param = dyn_cast<Parameter>(params[index]);
688 MS_EXCEPTION_IF_NULL(param);
689 if (!param->has_default()) {
690 MS_LOG(EXCEPTION) << "Can not determine value of Parameter " << index << " (" << param->name() << ")";
691 }
692 auto tensor = param->default_param();
693 *ret_val = py::cast(tensor);
694 }
695 *ret_val = SetAdaptedAttrToTensor(*ret_val, output->abstract());
696 auto abs = output->abstract();
697 MS_EXCEPTION_IF_NULL(abs);
698 if (abs->isa<abstract::AbstractTensor>()) {
699 py::setattr(*ret_val, "__ms_parameter_output__", py::bool_(true));
700 }
701 return true;
702 }
703 return false;
704 }
705
706 // SparseTensor Converters
707 using TensorPtr = tensor::TensorPtr;
708 using CSRTensor = tensor::CSRTensor;
709 constexpr size_t kCSRTensorInputSize{4};
710 constexpr size_t kCOOTensorInputSize{3};
711
CheckCSRValueNums(size_t size)712 void CheckCSRValueNums(size_t size) {
713 if (size < kCSRTensorInputSize) {
714 MS_LOG(EXCEPTION) << "CSRTensor must have at least " << kCSRTensorInputSize << " inputs, but got " << size;
715 }
716 }
717
MakeCSRTensor(const ValuePtr & value)718 py::object MakeCSRTensor(const ValuePtr &value) {
719 py::object ret;
720 if (value->isa<ValueSequence>()) {
721 auto value_sequeue = value->cast<ValueSequencePtr>()->value();
722 CheckCSRValueNums(value_sequeue.size());
723 TensorPtr indptr = utils::cast<TensorPtr>(value_sequeue[tensor::CSRTensor::kIndptrIdx]);
724 TensorPtr indices = utils::cast<TensorPtr>(value_sequeue[tensor::CSRTensor::kIndicesIdx]);
725 TensorPtr values = utils::cast<TensorPtr>(value_sequeue[tensor::CSRTensor::kValuesIdx]);
726 ValueTuplePtr shape_ptr = utils::cast<ValueTuplePtr>(value_sequeue[tensor::CSRTensor::kShapeIdx]);
727 ShapeVector shape = ConvertShapeTupleToShapeVector(shape_ptr);
728 auto csr_tensor_ptr = std::make_shared<CSRTensor>(indptr, indices, values, shape);
729 return CSRTensorToPyData(csr_tensor_ptr);
730 }
731 MS_LOG(WARNING) << "value is not ValueSequence, but got " << value->ToString();
732 return ret;
733 }
734
MakeCSRTensor(const VectorRef & value_list)735 py::object MakeCSRTensor(const VectorRef &value_list) {
736 CheckCSRValueNums(value_list.size());
737 TensorPtr indptr = utils::cast<TensorPtr>(value_list[tensor::CSRTensor::kIndptrIdx]);
738 TensorPtr indices = utils::cast<TensorPtr>(value_list[tensor::CSRTensor::kIndicesIdx]);
739 TensorPtr values = utils::cast<TensorPtr>(value_list[tensor::CSRTensor::kValuesIdx]);
740 ShapeVector shape = ConvertToShapeVector(value_list, tensor::CSRTensor::kShapeIdx);
741 auto csr_tensor_ptr = std::make_shared<CSRTensor>(indptr, indices, values, shape);
742 return CSRTensorToPyData(csr_tensor_ptr);
743 }
744
ConvertShapeTupleToShapeVector(const ValueTuplePtr & shape_tuple)745 ShapeVector ConvertShapeTupleToShapeVector(const ValueTuplePtr &shape_tuple) {
746 ShapeVector shape;
747 MS_EXCEPTION_IF_NULL(shape_tuple);
748 for (const auto &v : shape_tuple->value()) {
749 MS_EXCEPTION_IF_NULL(v);
750 ScalarPtr scalar = v->cast<ScalarPtr>();
751 MS_EXCEPTION_IF_NULL(scalar);
752 shape.push_back(GetValue<int64_t>(scalar));
753 }
754 return shape;
755 }
756
ConvertToShapeVector(const VectorRef & value_list,size_t index)757 ShapeVector ConvertToShapeVector(const VectorRef &value_list, size_t index) {
758 ShapeVector shape;
759 if (index >= value_list.size()) {
760 MS_LOG(EXCEPTION) << "Index " << index << " is out of range of " << value_list.size();
761 }
762 BaseRef ref = value_list[index];
763 MS_EXCEPTION_IF_NULL(ref);
764
765 auto converter = [](const BaseRef &ref) {
766 auto tensorptr = utils::cast<tensor::TensorPtr>(ref);
767 MS_EXCEPTION_IF_NULL(tensorptr);
768 if (tensorptr->DataDim() != 0) {
769 MS_LOG(EXCEPTION) << "Element must be scalar!";
770 }
771 tensorptr->data_sync(false);
772 return *(static_cast<int64_t *>(tensorptr->data_c()));
773 };
774
775 if (utils::isa<tensor::Tensor>(ref)) {
776 (void)std::transform(value_list.begin() + SizeToLong(index), value_list.end(), std::back_inserter(shape),
777 converter);
778 } else if (utils::isa<VectorRef>(ref)) {
779 VectorRef shape_ref = utils::cast<VectorRef>(ref);
780 (void)std::transform(shape_ref.begin(), shape_ref.end(), std::back_inserter(shape), converter);
781 } else if (utils::isa<ValueTuple>(ref)) {
782 ValueTuplePtr shape_tuple = utils::cast<ValueTuplePtr>(ref);
783 shape = ConvertShapeTupleToShapeVector(shape_tuple);
784 }
785 if (shape.empty()) {
786 MS_LOG(ERROR) << "ShapeVector is empty!";
787 }
788 return shape;
789 }
790
CheckCOOValueNums(size_t size)791 void CheckCOOValueNums(size_t size) {
792 if (size < kCOOTensorInputSize) {
793 MS_LOG(EXCEPTION) << "COOTensor must have at least " << kCOOTensorInputSize << " inputs, but got " << size;
794 }
795 }
796
MakeCOOTensor(const ValuePtr & value)797 py::object MakeCOOTensor(const ValuePtr &value) {
798 auto ret = py::tuple(1);
799 if (value->isa<ValueSequence>()) {
800 auto value_sequeue = value->cast<ValueSequencePtr>()->value();
801 CheckCOOValueNums(value_sequeue.size());
802 TensorPtr indices = utils::cast<TensorPtr>(value_sequeue[tensor::COOTensor::kIndicesIdx]);
803 TensorPtr values = utils::cast<TensorPtr>(value_sequeue[tensor::COOTensor::kValuesIdx]);
804 ValueTuplePtr shape_ptr = utils::cast<ValueTuplePtr>(value_sequeue[tensor::COOTensor::kShapeIdx]);
805 ShapeVector shape = ConvertShapeTupleToShapeVector(shape_ptr);
806 ret[0] = std::make_shared<tensor::COOTensor>(indices, values, shape);
807 }
808 MS_LOG(WARNING) << "value is not ValueSequence, but got " << value->ToString();
809 return ret[0];
810 }
811
MakeCOOTensor(const VectorRef & value_list)812 py::object MakeCOOTensor(const VectorRef &value_list) {
813 CheckCOOValueNums(value_list.size());
814 tensor::TensorPtr indices = utils::cast<tensor::TensorPtr>(value_list[tensor::COOTensor::kIndicesIdx]);
815 tensor::TensorPtr values = utils::cast<tensor::TensorPtr>(value_list[tensor::COOTensor::kValuesIdx]);
816 ShapeVector shape = ConvertToShapeVector(value_list, tensor::COOTensor::kShapeIdx);
817 auto ret = py::tuple(1);
818 ret[0] = std::make_shared<tensor::COOTensor>(indices, values, shape);
819 return ret[0];
820 }
821
IsStubTensor(const py::handle & obj)822 bool IsStubTensor(const py::handle &obj) { return py::hasattr(obj, stub::PY_ATTR_STUB); }
823
ConvertStubTensor(const py::handle & obj)824 tensor::TensorPtr ConvertStubTensor(const py::handle &obj) {
825 auto py_stub = py::getattr(obj, stub::PY_ATTR_STUB);
826 auto stub = py_stub.cast<stub::StubNodePtr>();
827 if (stub == nullptr) {
828 auto tensor = py::getattr(obj, stub::PY_ATTR_TENSOR).cast<tensor::TensorPtr>();
829 MS_EXCEPTION_IF_NULL(tensor);
830 return tensor;
831 }
832 auto func_sync = obj.attr(stub::PY_ATTR_SYNC);
833 auto res = func_sync();
834 auto tensor = res.cast<tensor::TensorPtr>();
835 MS_EXCEPTION_IF_NULL(tensor);
836 return tensor;
837 }
838
PyStubNodeCast(const py::handle & obj)839 ValuePtr PyStubNodeCast(const py::handle &obj) {
840 auto py_stub = py::getattr(obj, stub::PY_ATTR_STUB);
841 auto stub = py_stub.cast<stub::StubNodePtr>();
842 if (stub == nullptr) {
843 auto tensor = py::getattr(obj, stub::PY_ATTR_TENSOR).cast<tensor::TensorPtr>();
844 MS_EXCEPTION_IF_NULL(tensor);
845 return tensor;
846 }
847 return stub;
848 }
849
GetStubTensorInfo(const py::handle & obj)850 std::pair<ShapeVector, TypePtr> GetStubTensorInfo(const py::handle &obj) {
851 auto py_stub = py::getattr(obj, stub::PY_ATTR_STUB);
852 ValuePtr stub = py_stub.cast<stub::StubNodePtr>();
853 AbstractBasePtr stub_abs;
854 if (stub == nullptr) {
855 auto tensor_ptr = py::getattr(obj, stub::PY_ATTR_TENSOR).cast<tensor::TensorPtr>();
856 MS_EXCEPTION_IF_NULL(tensor_ptr);
857 stub_abs = tensor_ptr->ToAbstract();
858 } else {
859 stub_abs = stub->ToAbstract();
860 }
861 MS_EXCEPTION_IF_NULL(stub_abs);
862 return {dyn_cast<abstract::Shape>(stub_abs->BuildShape())->shape(), stub_abs->BuildType()};
863 }
864
ShallowCopyTensorValue(const ValuePtr & value)865 ValuePtr ShallowCopyTensorValue(const ValuePtr &value) {
866 MS_EXCEPTION_IF_NULL(value);
867 if (value->isa<tensor::BaseTensor>()) {
868 auto tensor_value = value->cast<tensor::BaseTensorPtr>();
869 MS_EXCEPTION_IF_NULL(tensor_value);
870 auto shallow_tensor = std::make_shared<tensor::Tensor>(*tensor_value);
871 return shallow_tensor;
872 } else if (value->isa<ValueSequence>()) {
873 std::vector<ValuePtr> values;
874 const auto &value_seq = value->cast<ValueSequencePtr>();
875 MS_EXCEPTION_IF_NULL(value_seq);
876 (void)std::transform(value_seq->value().begin(), value_seq->value().end(), std::back_inserter(values),
877 [](const ValuePtr &elem) { return ShallowCopyTensorValue(elem); });
878 return std::make_shared<ValueTuple>(values);
879 } else if (value->isa<stub::StubNode>()) {
880 auto stub_node = value->cast<stub::StubNodePtr>();
881 MS_EXCEPTION_IF_NULL(stub_node);
882 return ShallowCopyTensorValue(stub_node->WaitValue());
883 } else {
884 return value;
885 }
886 }
887
ConvertPyObjectToCObject(const py::object & input_object)888 ValuePtr ConvertPyObjectToCObject(const py::object &input_object) {
889 ValuePtr output = nullptr;
890 if (py::isinstance<tensor::Tensor>(input_object)) {
891 output = py::cast<tensor::TensorPtr>(input_object);
892 } else if (IsStubTensor(input_object)) {
893 output = ConvertStubTensor(input_object);
894 } else if (py::isinstance<py::float_>(input_object)) {
895 double input_value = py::cast<py::float_>(input_object);
896 output = std::make_shared<tensor::Tensor>(input_value, kFloat32);
897 } else if (py::isinstance<py::int_>(input_object)) {
898 output = std::make_shared<tensor::Tensor>(py::cast<int64_t>(input_object), kInt64);
899 } else if (py::isinstance<py::list>(input_object)) {
900 ValuePtrList values;
901 auto list_inputs = py::cast<py::list>(input_object);
902 for (size_t i = 0; i < list_inputs.size(); ++i) {
903 (void)values.emplace_back(ConvertPyObjectToCObject(list_inputs[i]));
904 }
905 output = std::make_shared<ValueList>(values);
906 } else if (py::isinstance<py::tuple>(input_object)) {
907 ValuePtrList values;
908 auto tuple_inputs = py::cast<py::tuple>(input_object);
909 for (size_t i = 0; i < tuple_inputs.size(); ++i) {
910 (void)values.emplace_back(ConvertPyObjectToCObject(tuple_inputs[i]));
911 }
912 output = std::make_shared<ValueTuple>(values);
913 } else if (py::isinstance<tensor::CSRTensor>(input_object)) {
914 output = py::cast<tensor::CSRTensorPtr>(input_object);
915 } else if (py::isinstance<tensor::COOTensor>(input_object)) {
916 output = py::cast<tensor::COOTensorPtr>(input_object);
917 } else if (py::isinstance<py::none>(input_object)) {
918 output = kNone;
919 } else {
920 MS_EXCEPTION(TypeError) << "Unreasonable data type: " << input_object.get_type() << ".";
921 }
922 MS_EXCEPTION_IF_NULL(output);
923 return output;
924 }
925
ConvertPyObjectToTensor(const py::object & input_object,std::vector<ValuePtr> * tensors)926 void ConvertPyObjectToTensor(const py::object &input_object, std::vector<ValuePtr> *tensors) {
927 if (!py::isinstance<py::tuple>(input_object)) {
928 MS_LOG(EXCEPTION) << "Input object should be tuple";
929 }
930 auto tuple_inputs = py::cast<py::tuple>(input_object);
931 for (size_t i = 0; i < tuple_inputs.size(); ++i) {
932 (void)tensors->emplace_back(ConvertPyObjectToCObject(tuple_inputs[i]));
933 }
934 }
935
ConvertCTensorToPyTensor(const py::tuple & input_args,py::tuple * convert_args)936 void ConvertCTensorToPyTensor(const py::tuple &input_args, py::tuple *convert_args) {
937 MS_EXCEPTION_IF_NULL(convert_args);
938 if (input_args.size() != (*convert_args).size()) {
939 MS_LOG(EXCEPTION) << "The size of input_args: " << input_args.size()
940 << " should be equal to the size of convert_args: " << (*convert_args).size();
941 }
942 for (size_t i = 0; i < input_args.size(); ++i) {
943 if (py::isinstance<tensor::Tensor>(input_args[i])) {
944 (*convert_args)[i] =
945 python_adapter::CallPyFn(parse::PYTHON_MOD_PARSE_MODULE, parse::PYTHON_MOD_CONVERT_TO_MS_TENSOR, input_args[i]);
946 } else if (py::isinstance<tensor::CSRTensor>(input_args[i])) {
947 (*convert_args)[i] = python_adapter::CallPyFn(parse::PYTHON_MOD_PARSE_MODULE,
948 parse::PYTHON_MOD_CONVERT_TO_MS_CSRTENSOR, input_args[i]);
949 } else if (py::isinstance<tensor::COOTensor>(input_args[i])) {
950 (*convert_args)[i] = python_adapter::CallPyFn(parse::PYTHON_MOD_PARSE_MODULE,
951 parse::PYTHON_MOD_CONVERT_TO_MS_COOTENSOR, input_args[i]);
952 } else if (py::isinstance<py::tuple>(input_args[i])) {
953 auto tuple_inp_arg = py::cast<py::tuple>(input_args[i]);
954 py::tuple convert_tuple_arg(tuple_inp_arg.size());
955 ConvertCTensorToPyTensor(tuple_inp_arg, &convert_tuple_arg);
956 (*convert_args)[i] = convert_tuple_arg;
957 } else {
958 (*convert_args)[i] = input_args[i];
959 }
960 }
961 }
962
ConvertPyObjToString(const py::object & obj)963 std::string ConvertPyObjToString(const py::object &obj) { return py::str(obj).cast<std::string>(); }
964 } // namespace mindspore
965