1 /**
2 * Copyright 2019-2020 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "utils/convert_utils_py.h"
18
19 #include <vector>
20 #include <string>
21 #include <memory>
22 #include <algorithm>
23 #include <list>
24 #include <utility>
25 #include <cfloat>
26
27 #include "abstract/abstract_value.h"
28 #include "abstract/utils.h"
29 #include "pipeline/jit/parse/parse.h"
30 #include "pipeline/jit/parse/parse_base.h"
31 #include "pipeline/jit/parse/resolve.h"
32 #include "ir/value.h"
33 #include "ir/tensor.h"
34 #include "ir/param_info.h"
35 #include "pybind_api/ir/base_ref_py.h"
36 #include "utils/ms_context.h"
37
38 namespace mindspore {
39 py::object BuiltinsToPyData(const Any &value);
40 py::object BuiltinsToPyData(const BaseRef &value);
41 py::object VectorToPyData(const Any &value);
42 py::object VectorRefToPyData(const VectorRef &value);
43
TensorToPyData(const tensor::TensorPtr & tensor)44 py::object TensorToPyData(const tensor::TensorPtr &tensor) {
45 MS_EXCEPTION_IF_NULL(tensor);
46 if (tensor->NeedWait()) {
47 py::gil_scoped_release release;
48 tensor->Wait();
49 }
50 py::tuple v(1);
51 v[0] = tensor;
52 return v[0];
53 }
54
ScalarPtrToPyData(const ScalarPtr & value)55 py::object ScalarPtrToPyData(const ScalarPtr &value) {
56 py::int_ int_v;
57 py::float_ float_v;
58 py::bool_ bool_v;
59 TypeId scalar_type = value->type()->type_id();
60 switch (scalar_type) {
61 case kNumberTypeUInt8:
62 MS_LOG(DEBUG) << "uint8";
63 int_v = value->cast<UInt8ImmPtr>()->value();
64 return std::move(int_v);
65 case kNumberTypeUInt16:
66 MS_LOG(DEBUG) << "uint16";
67 int_v = value->cast<UInt16ImmPtr>()->value();
68 return std::move(int_v);
69 case kNumberTypeUInt32:
70 MS_LOG(DEBUG) << "uint32";
71 int_v = value->cast<UInt32ImmPtr>()->value();
72 return std::move(int_v);
73 case kNumberTypeUInt64:
74 MS_LOG(DEBUG) << "uint64";
75 int_v = value->cast<UInt64ImmPtr>()->value();
76 return std::move(int_v);
77 case kNumberTypeInt8:
78 MS_LOG(DEBUG) << "int8";
79 int_v = value->cast<Int8ImmPtr>()->value();
80 return std::move(int_v);
81 case kNumberTypeInt16:
82 MS_LOG(DEBUG) << "int16";
83 int_v = value->cast<Int16ImmPtr>()->value();
84 return std::move(int_v);
85 case kNumberTypeInt32:
86 MS_LOG(DEBUG) << "int32";
87 int_v = value->cast<Int32ImmPtr>()->value();
88 return std::move(int_v);
89 case kNumberTypeInt64:
90 MS_LOG(DEBUG) << "int64";
91 int_v = value->cast<Int64ImmPtr>()->value();
92 return std::move(int_v);
93 case kNumberTypeFloat32:
94 MS_LOG(DEBUG) << "float";
95 float_v = value->cast<FP32ImmPtr>()->value();
96 return std::move(float_v);
97 case kNumberTypeFloat64:
98 MS_LOG(DEBUG) << "double";
99 float_v = value->cast<FP64ImmPtr>()->value();
100 return std::move(float_v);
101 case kNumberTypeBool:
102 MS_LOG(DEBUG) << "bool";
103 bool_v = value->cast<BoolImmPtr>()->value();
104 return std::move(bool_v);
105 default:
106 MS_EXCEPTION(TypeError) << "Unsupported scalar converted to py data: " << value->ToString();
107 }
108 }
109
110 using ConverterFunction = std::function<py::object(const ValuePtr &value)>;
111 using ValueNameToConverterVector = std::vector<std::pair<const char *, ConverterFunction>>;
112
113 // (Value Type Name) -> (Converter Function)
114 // The converter function is used to convert Value object to Python data object.
115 static ValueNameToConverterVector value_name_to_converter = {
116 // Scalar
117 {typeid(Scalar).name(),
__anon00217d6e0102() 118 [](const ValuePtr &value) -> py::object { return ScalarPtrToPyData(value->cast<ScalarPtr>()); }},
119 // Tensor
120 {typeid(tensor::Tensor).name(),
__anon00217d6e0202() 121 [](const ValuePtr &value) -> py::object {
122 auto tensor_ptr = value->cast<tensor::TensorPtr>();
123 return TensorToPyData(tensor_ptr);
124 }},
125 // MetaTenser
126 {typeid(tensor::MetaTensor).name(),
__anon00217d6e0302() 127 [](const ValuePtr &value) -> py::object {
128 py::tuple tuple_container(1);
129 tuple_container[0] = value->cast<tensor::MetaTensorPtr>();
130 return tuple_container[0];
131 }},
132 // RefKey
133 {typeid(RefKey).name(),
__anon00217d6e0402() 134 [](const ValuePtr &value) -> py::object {
135 py::tuple tuple_container(1);
136 tuple_container[0] = value->cast<RefKeyPtr>();
137 return tuple_container[0];
138 }},
139 // Type
140 {typeid(Type).name(),
__anon00217d6e0502() 141 [](const ValuePtr &value) -> py::object {
142 py::tuple tuple_container(1);
143 tuple_container[0] = value->cast<TypePtr>();
144 return tuple_container[0];
145 }},
146 // StringImm
147 {typeid(StringImm).name(),
__anon00217d6e0602() 148 [](const ValuePtr &value) -> py::object {
149 py::str res = value->cast<StringImmPtr>()->value();
150 return res;
151 }},
152 // ValueSequeue
153 {typeid(ValueSequeue).name(),
__anon00217d6e0702() 154 [](const ValuePtr &value) -> py::object {
155 auto value_sequeue = value->cast<ValueSequeuePtr>()->value();
156 py::tuple res_sequeue(value_sequeue.size());
157 for (size_t i = 0; i < value_sequeue.size(); i++) {
158 res_sequeue[i] = ValueToPyData(value_sequeue[i]);
159 }
160 if (value->isa<ValueTuple>()) {
161 return res_sequeue;
162 }
163 return res_sequeue.cast<py::list>();
164 }},
165 // ValueDictionary
166 {typeid(ValueDictionary).name(),
__anon00217d6e0802() 167 [](const ValuePtr &value) -> py::object {
168 auto value_list = value->cast<ValueDictionaryPtr>()->value();
169 py::dict res_dict;
170 for (const auto &value : value_list) {
171 res_dict[py::str(value.first)] = ValueToPyData(value.second);
172 }
173 return res_dict;
174 }},
175 // ValueSlice
176 {typeid(ValueSlice).name(),
__anon00217d6e0902() 177 [](const ValuePtr &value) -> py::object {
178 auto slice = value->cast<ValueSlicePtr>();
179 auto start = ValueToPyData(slice->start());
180 auto end = ValueToPyData(slice->stop());
181 auto step = ValueToPyData(slice->step());
182 return parse::python_adapter::CallPyFn(parse::PYTHON_MOD_PARSE_MODULE, parse::PYTHON_PARSE_CLASS_SLICE, start, end,
183 step);
184 }},
185 // KeywordArg
186 {typeid(KeywordArg).name(),
__anon00217d6e0a02() 187 [](const ValuePtr &value) -> py::object {
188 auto abs_keyword_arg = value->ToAbstract()->cast<abstract::AbstractKeywordArgPtr>();
189 auto key = abs_keyword_arg->get_key();
190 auto val = abs_keyword_arg->get_arg()->BuildValue();
191 auto py_value = ValueToPyData(val);
192 auto kwargs = py::kwargs();
193 kwargs[key.c_str()] = py_value;
194 return kwargs;
195 }},
196 // parse::NameSpace
197 {typeid(parse::NameSpace).name(),
__anon00217d6e0b02() 198 [](const ValuePtr &value) -> py::object {
199 auto ns = value->cast<parse::NameSpacePtr>();
200 return ns->module_obj();
201 }},
202 // parse::ClassType
203 {typeid(parse::ClassType).name(),
__anon00217d6e0c02() 204 [](const ValuePtr &value) -> py::object {
205 auto class_type = value->cast<parse::ClassTypePtr>();
206 return class_type->obj();
207 }},
208 // None
__anon00217d6e0d02() 209 {typeid(None).name(), [](const ValuePtr &value) -> py::object { return py::none(); }},
210 // AnyValue
__anon00217d6e0e02() 211 {typeid(AnyValue).name(), [](const ValuePtr &value) -> py::object { return py::none(); }},
212 // FuncGraph
__anon00217d6e0f02() 213 {typeid(FuncGraph).name(), [](const ValuePtr &value) -> py::object { return py::none(); }},
214 // Monad
__anon00217d6e1002() 215 {typeid(Monad).name(), [](const ValuePtr &value) -> py::object { return py::none(); }},
216 // Ellipsis
__anon00217d6e1102() 217 {typeid(Ellipsis).name(), [](const ValuePtr &value) -> py::object { return py::ellipsis(); }}};
218
ValueToPyData(const ValuePtr & value)219 py::object ValueToPyData(const ValuePtr &value) {
220 if (value == nullptr) {
221 MS_LOG(EXCEPTION) << "The `value` should not be null";
222 }
223 for (auto &iter : value_name_to_converter) {
224 if (value->IsFromTypeId(Base::GetTypeId(iter.first))) {
225 return iter.second(value);
226 }
227 }
228 MS_LOG(EXCEPTION) << "Unsupported to convert " << value->ToString() << "[" << value->type_name() << "] to a PyData";
229 }
230
AnyToPyData(const Any & value)231 py::object AnyToPyData(const Any &value) {
232 py::object ret;
233 MS_LOG(DEBUG) << "AnyToPyData " << value.GetString();
234 if (value.is<int>() || value.is<float>() || value.is<double>() || value.is<bool>()) {
235 ret = BuiltinsToPyData(value);
236 } else if (value.is<ValuePtr>()) {
237 MS_LOG(DEBUG) << "ValuePtr";
238 ValuePtr v = value.cast<ValuePtr>();
239 ret = ValueToPyData(v);
240 } else if (value.is<tensor::TensorPtr>()) {
241 MS_LOG(DEBUG) << "tensor";
242 auto tensor_ptr = value.cast<tensor::TensorPtr>();
243 ret = TensorToPyData(tensor_ptr);
244 } else if (value.is<py::object>()) {
245 MS_LOG(DEBUG) << "py obj";
246 ret = value.cast<py::object>();
247 } else if (value.is<std::vector<tensor::TensorPtr>>() || value.is<std::vector<Any>>()) {
248 ret = VectorToPyData(value);
249 } else if (value.is<std::list<Any>>()) {
250 MS_LOG(DEBUG) << "list_any";
251 auto value_list = value.cast<std::list<Any>>();
252 py::list rets = py::list();
253 for (auto &v : value_list) {
254 rets.append(AnyToPyData(v));
255 }
256 ret = rets;
257 } else if (value.is<std::vector<Any>>()) {
258 auto value_list = value.cast<std::vector<Any>>();
259 py::tuple rets(value_list.size());
260 for (size_t i = 0; i < value_list.size(); i++) {
261 rets[i] = AnyToPyData(value_list[i]);
262 }
263 ret = rets;
264 } else if (value.is<TypePtr>()) {
265 py::tuple v(1);
266 v[0] = value.cast<TypePtr>();
267 ret = v[0];
268 } else {
269 MS_LOG(EXCEPTION) << "value is not support type";
270 }
271 return ret;
272 }
273
BaseRefToPyData(const BaseRef & value)274 py::object BaseRefToPyData(const BaseRef &value) {
275 py::object ret;
276 MS_LOG(DEBUG) << "BaseRefToPyData " << value.ToString();
277 if (utils::isa<int>(value) || utils::isa<float>(value) || utils::isa<double>(value) || utils::isa<bool>(value)) {
278 ret = BuiltinsToPyData(value);
279 } else if (utils::isa<ValuePtr>(value)) {
280 MS_LOG(DEBUG) << "ValuePtr";
281 ValuePtr v = utils::cast<ValuePtr>(value);
282 ret = ValueToPyData(v);
283 } else if (utils::isa<tensor::TensorPtr>(value)) {
284 MS_LOG(DEBUG) << "tensor";
285 auto tensor_ptr = utils::cast<tensor::TensorPtr>(value);
286 ret = TensorToPyData(tensor_ptr);
287 } else if (utils::isa<PyObjectRef>(value)) {
288 MS_LOG(DEBUG) << "py obj";
289 PyObjectRef py_ref = utils::cast<PyObjectRef>(value);
290 ret = py_ref.object_;
291 } else if (utils::isa<VectorRef>(value)) {
292 auto vec_ref = utils::cast<VectorRef>(value);
293 ret = VectorRefToPyData(vec_ref);
294 } else if (utils::isa<TypePtr>(value)) {
295 py::tuple v(1);
296 v[0] = utils::cast<TypePtr>(value);
297 ret = v[0];
298 } else {
299 MS_LOG(EXCEPTION) << "value is not support type";
300 }
301 return ret;
302 }
303
BuiltinsToPyData(const Any & value)304 py::object BuiltinsToPyData(const Any &value) {
305 if (value.is<int>()) {
306 MS_LOG(DEBUG) << "int";
307 py::int_ ret = value.cast<int>();
308 return std::move(ret);
309 } else if (value.is<float>()) {
310 MS_LOG(DEBUG) << "float";
311 py::float_ ret = value.cast<float>();
312 return std::move(ret);
313 } else if (value.is<double>()) {
314 MS_LOG(DEBUG) << "double";
315 py::float_ ret = value.cast<double>();
316 return std::move(ret);
317 } else {
318 MS_LOG(DEBUG) << "bool";
319 py::bool_ ret = value.cast<bool>();
320 return std::move(ret);
321 }
322 }
323
BuiltinsToPyData(const BaseRef & value)324 py::object BuiltinsToPyData(const BaseRef &value) {
325 if (utils::isa<int>(value)) {
326 MS_LOG(DEBUG) << "int";
327 py::int_ ret = utils::cast<int>(value);
328 return std::move(ret);
329 } else if (utils::isa<float>(value)) {
330 MS_LOG(DEBUG) << "float";
331 py::float_ ret = utils::cast<float>(value);
332 return std::move(ret);
333 } else if (utils::isa<double>(value)) {
334 MS_LOG(DEBUG) << "double";
335 py::float_ ret = utils::cast<double>(value);
336 return std::move(ret);
337 } else {
338 MS_LOG(DEBUG) << "bool";
339 py::bool_ ret = utils::cast<bool>(value);
340 return std::move(ret);
341 }
342 }
343
VectorToPyData(const Any & value)344 py::object VectorToPyData(const Any &value) {
345 py::object ret;
346 if (value.is<std::vector<tensor::TensorPtr>>()) {
347 MS_LOG(DEBUG) << "vector_tensor";
348 std::vector<tensor::TensorPtr> outputs;
349 outputs = value.cast<std::vector<tensor::TensorPtr>>();
350 py::tuple tensor_tuple(outputs.size());
351 for (std::size_t i = 0; i < outputs.size(); ++i) {
352 tensor_tuple[i] = *outputs[i];
353 }
354 ret = tensor_tuple;
355 } else {
356 MS_LOG(DEBUG) << "vector_any";
357 auto value_list = value.cast<std::vector<Any>>();
358 py::tuple any_tuple = py::tuple(value_list.size());
359 size_t i = 0;
360 for (auto &v : value_list) {
361 any_tuple[i] = AnyToPyData(v);
362 i++;
363 }
364 ret = any_tuple;
365 }
366 return ret;
367 }
368
VectorRefToPyData(const VectorRef & value_list)369 py::object VectorRefToPyData(const VectorRef &value_list) {
370 py::object ret;
371 MS_LOG(DEBUG) << "vector_ref";
372 size_t value_size = value_list.size();
373 auto ref_tuple = py::tuple(value_size);
374 for (size_t i = 0; i < value_size; i++) {
375 ref_tuple[i] = BaseRefToPyData(value_list[i]);
376 }
377 ret = ref_tuple;
378 return ret;
379 }
380
SetValueRange(const AbstractBasePtr & tensor,const py::object & output)381 void SetValueRange(const AbstractBasePtr &tensor, const py::object &output) {
382 if (output.is_none()) {
383 return;
384 }
385 py::object obj_min =
386 output.contains(py::str(ATTR_MIN_VALUE)) ? (py::object)output[ATTR_MIN_VALUE] : (py::object)py::none();
387 py::object obj_max =
388 output.contains(py::str(ATTR_MAX_VALUE)) ? (py::object)output[ATTR_MAX_VALUE] : (py::object)py::none();
389
390 if (!obj_min.is_none() && !obj_max.is_none()) {
391 bool converted = true;
392 ValuePtr min_value = nullptr;
393 ValuePtr max_value = nullptr;
394 converted = parse::ConvertData(obj_min, &min_value);
395 if (!converted) {
396 MS_LOG(EXCEPTION) << "Convert shape min value data failed";
397 }
398 converted = parse::ConvertData(obj_max, &max_value);
399 if (!converted) {
400 MS_LOG(EXCEPTION) << "Convert shape max value data failed";
401 }
402 auto abs_tensor = dyn_cast<abstract::AbstractTensor>(tensor);
403 abs_tensor->set_value_range(min_value, max_value);
404 }
405 }
406
MakePyInferRes2AbstractTensor(const py::object & shape_obj,const py::object & type_obj,const py::object & output)407 AbstractBasePtr MakePyInferRes2AbstractTensor(const py::object &shape_obj, const py::object &type_obj,
408 const py::object &output) {
409 auto ret_vec = shape_obj.cast<ShapeVector>();
410 auto ret_dtype = type_obj.cast<TypePtr>();
411 ShapeVector min_shape_vec;
412 ShapeVector max_shape_vec;
413
414 if (!output.is_none()) {
415 py::object min_shape =
416 output.contains(py::str(ATTR_MIN_SHAPE)) ? (py::object)output[ATTR_MIN_SHAPE] : (py::object)py::none();
417 py::object max_shape =
418 output.contains(py::str(ATTR_MAX_SHAPE)) ? (py::object)output[ATTR_MAX_SHAPE] : (py::object)py::none();
419 if (!min_shape.is_none()) {
420 min_shape_vec = min_shape.cast<ShapeVector>();
421 }
422 if (!max_shape.is_none()) {
423 max_shape_vec = max_shape.cast<ShapeVector>();
424 }
425 }
426
427 auto ret_shape = std::make_shared<abstract::Shape>(ret_vec, min_shape_vec, max_shape_vec);
428 AbstractBasePtr tensor = MakeAbstractTensor(ret_shape, ret_dtype);
429
430 SetValueRange(tensor, output);
431 return tensor;
432 }
433
IsMonadType(const py::object & type_obj)434 static bool IsMonadType(const py::object &type_obj) {
435 if (py::isinstance<Type>(type_obj)) {
436 auto type = type_obj.cast<Type *>();
437 return type->isa<MonadType>();
438 }
439 return false;
440 }
441
ToMonadAbstract(const py::object & type_obj)442 static AbstractBasePtr ToMonadAbstract(const py::object &type_obj) {
443 if (py::isinstance<Type>(type_obj)) {
444 auto type = type_obj.cast<Type *>();
445 if (!type->isa<MonadType>()) {
446 MS_LOG(EXCEPTION) << "Not a monad type object: " << py::str(type_obj);
447 }
448 return abstract::MakeMonadAbstract(type->cast<MonadTypePtr>());
449 }
450 MS_LOG(EXCEPTION) << "Not a type object: " << py::str(type_obj);
451 }
452
MakePyInferRes2Abstract(const py::object & shape_obj,const py::object & type_obj,const py::object & output)453 AbstractBasePtr MakePyInferRes2Abstract(const py::object &shape_obj, const py::object &type_obj,
454 const py::object &output) {
455 if ((py::isinstance<py::list>(shape_obj) || py::isinstance<py::tuple>(shape_obj)) && py::isinstance<Type>(type_obj)) {
456 auto ret_vec = shape_obj.cast<ShapeVector>();
457 auto ret_dtype = type_obj.cast<TypePtr>();
458 MS_EXCEPTION_IF_NULL(ret_dtype);
459 // if the size of shape list is empty, return an scalar abstract
460 if (ret_vec.empty() && (!ret_dtype->isa<TensorType>())) {
461 abstract::AbstractScalarPtr abs_scalar = std::make_shared<abstract::AbstractScalar>(kAnyValue, ret_dtype);
462 return abs_scalar;
463 }
464 return MakePyInferRes2AbstractTensor(shape_obj, type_obj, output);
465 } else if (py::isinstance<py::tuple>(shape_obj) && py::isinstance<py::tuple>(type_obj)) {
466 auto shape_tuple = shape_obj.cast<py::tuple>();
467 auto typeid_tuple = type_obj.cast<py::tuple>();
468 AbstractBasePtrList ptr_list;
469 for (size_t it = 0; it < shape_tuple.size(); ++it) {
470 auto tensor_it = MakePyInferRes2Abstract(shape_tuple[it], typeid_tuple[it]);
471 ptr_list.push_back(tensor_it);
472 }
473 auto tuple = std::make_shared<abstract::AbstractTuple>(ptr_list);
474 return tuple;
475 } else if (py::isinstance<py::list>(shape_obj) && py::isinstance<py::list>(type_obj)) {
476 auto shape_list = shape_obj.cast<py::list>();
477 auto typeid_list = type_obj.cast<py::list>();
478 AbstractBasePtrList ptr_list;
479 for (size_t it = 0; it < shape_list.size(); ++it) {
480 auto tensor_it = MakePyInferRes2Abstract(shape_list[it], typeid_list[it]);
481 ptr_list.push_back(tensor_it);
482 }
483 auto list = std::make_shared<abstract::AbstractList>(ptr_list);
484 return list;
485 } else if (shape_obj.is_none() && type_obj.is_none()) {
486 // AbstractNone indicates there is no output for this CNode node.
487 auto abstract_none = std::make_shared<abstract::AbstractNone>();
488 return abstract_none;
489 } else if (IsMonadType(type_obj)) {
490 // Return monad abstract if it is monad type.
491 return ToMonadAbstract(type_obj);
492 } else {
493 // When sparse enabled, the undetermined might be raised and eliminated in opt passes
494 auto context = MsContext::GetInstance();
495 MS_EXCEPTION_IF_NULL(context);
496 bool enable_sparse = context->get_param<bool>(MS_CTX_ENABLE_SPARSE);
497 if (enable_sparse) {
498 return std::make_shared<abstract::AbstractUndetermined>();
499 }
500 MS_LOG(EXCEPTION) << "Python evaluator return invalid shape or type. " << (std::string)py::str(type_obj);
501 }
502 }
IsGraphOutputValueNodeOrParameter(const AnfNodePtr & output,const py::tuple & args,const std::shared_ptr<py::object> & ret_val)503 bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr &output, const py::tuple &args,
504 const std::shared_ptr<py::object> &ret_val) {
505 if (output->isa<ValueNode>()) {
506 MS_LOG(INFO) << "Graph's output is a constant. No need to execute.";
507 ValuePtr value = GetValueNode(output);
508 *ret_val = ValueToPyData(value);
509 return true;
510 }
511
512 // Adapter will transform values in __init__() and construct() to parameters, this could cause
513 // inputs (a.k.a args in current function) size less than parameters'.
514 if (output->isa<Parameter>()) {
515 MS_LOG(INFO) << "Graph's output is a parameter. If all params are inputs, no need to execute.";
516 // Find the right parameter as ret_val.
517 auto func_graph = output->func_graph();
518 MS_EXCEPTION_IF_NULL(func_graph);
519 auto params = func_graph->parameters();
520 if ((args.size() + func_graph->hyper_param_count()) != params.size()) {
521 MS_LOG(EXCEPTION) << "Input size " << args.size() << " add Parameter count " << func_graph->hyper_param_count()
522 << " not equal to graph input size " << params.size() << ", let graph to be executed.";
523 }
524
525 auto it = std::find(params.begin(), params.end(), output);
526 if (it == params.end()) {
527 MS_EXCEPTION(UnknownError) << "When graph output is Parameter, it should be found in graph parameters";
528 }
529 size_t index = it - params.cbegin();
530 if (index >= args.size() + func_graph->hyper_param_count()) {
531 MS_EXCEPTION(UnknownError) << "Index " << index << " equal or larger than args size " << args.size()
532 << " add Parameter count " << func_graph->hyper_param_count() << ".";
533 }
534 if (index < args.size()) {
535 *ret_val = args[index];
536 } else {
537 auto param = dyn_cast<Parameter>(params[index]);
538 MS_EXCEPTION_IF_NULL(param);
539 if (!param->has_default()) {
540 MS_LOG(EXCEPTION) << "Can not determine value of Parameter " << index << " (" << param->name() << ")";
541 }
542 auto tensor = param->default_param();
543 *ret_val = py::cast(tensor);
544 }
545 return true;
546 }
547 return false;
548 }
549 } // namespace mindspore
550