1 /**
2 * Copyright 2020-2021 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "ops/ops_func_impl/addn.h"
18
19 #include <map>
20 #include <memory>
21 #include <set>
22 #include <string>
23 #include <vector>
24 #include "mindapi/src/helper.h"
25 #include "mindspore/core/ops/array_ops.h"
26 #include "ops/op_utils.h"
27 #include "utils/check_convert_utils.h"
28
29 namespace mindspore {
30 namespace ops {
31 namespace {
32 // Special handle for empty shape and shape{1}.
ShapeHasSingleElement(const ShapeVector & shape)33 inline bool ShapeHasSingleElement(const ShapeVector &shape) {
34 return shape.empty() || (shape.size() == 1 && shape[0] == 1);
35 }
36
37 // shape1 is dst_shape, shape2 is source_shape.
AddNDynShapeJoin(ShapeVector * shape1,const ShapeVector * shape2)38 bool AddNDynShapeJoin(ShapeVector *shape1, const ShapeVector *shape2) {
39 MS_EXCEPTION_IF_NULL(shape1);
40 MS_EXCEPTION_IF_NULL(shape2);
41 if (ShapeHasSingleElement(*shape1) && ShapeHasSingleElement(*shape2)) {
42 return true;
43 }
44 // shape size not compatible.
45 if (shape1->size() != shape2->size()) {
46 MS_LOG(ERROR) << "Shape1 size:" << shape1->size() << ", Shape2 size:" << shape2->size();
47 return false;
48 }
49 for (size_t i = 0; i < shape1->size(); ++i) {
50 if ((*shape1)[i] == (*shape2)[i]) {
51 continue;
52 }
53 // If shape1 is dynamic, use shape of shape2. If shape2 is dynamic, keep shape1.
54 if ((*shape1)[i] == abstract::Shape::kShapeDimAny) {
55 (*shape1)[i] = (*shape2)[i];
56 continue;
57 }
58 if ((*shape2)[i] == abstract::Shape::kShapeDimAny) {
59 continue;
60 }
61 // If shape1 != shape2
62 MS_LOG(ERROR) << "Shape1[" << i << "]:" << (*shape1)[i] << ", Shape2[" << i << "]:" << (*shape2)[i] << ".";
63 return false;
64 }
65 return true;
66 }
67 } // namespace
68
InferShape(const PrimitivePtr & primitive,const std::vector<AbstractBasePtr> & input_args) const69 BaseShapePtr AddNFuncImpl::InferShape(const PrimitivePtr &primitive,
70 const std::vector<AbstractBasePtr> &input_args) const {
71 const auto &prim_name = primitive->name();
72 AbstractBasePtrList elements = input_args;
73 // If called from the backend, the input_args[0] is a KernelTensor, not AbstractSequence
74 if (input_args.size() == 1 && input_args[0]->isa<abstract::AbstractSequence>()) {
75 elements = input_args[0]->cast<abstract::AbstractSequencePtr>()->elements();
76 }
77 (void)CheckAndConvertUtils::CheckInteger("input num", SizeToLong(elements.size()), kGreaterEqual, 1, prim_name);
78 auto shape_0 = elements[0]->GetShape();
79 ShapeVector output_shape;
80 for (size_t i = 0; i < elements.size(); ++i) {
81 auto shape = elements[i]->GetShape();
82 ShapeVector shape_vec;
83 // If shape is no shape, it is a scalar, use empty shape vector as scalar shape.
84 if (shape->isa<abstract::TensorShape>()) {
85 shape_vec = shape->GetShapeVector();
86 }
87 // If any shape is dynamic rank, return a dynamic rank.
88 if (IsDynamicRank(shape_vec)) {
89 return std::make_shared<abstract::Shape>(ShapeVector({abstract::Shape::kShapeRankAny}));
90 }
91 // Record input0's shape.
92 if (i == 0) {
93 output_shape = shape_vec;
94 continue;
95 }
96 // Join input[i] with input[0]
97 if (!AddNDynShapeJoin(&output_shape, &shape_vec)) {
98 MS_EXCEPTION(ValueError) << "For '" << prim_name << "', input shape must be same, but got shape of input[" << i
99 << "]: " << shape->ToString() << ", shape of input[0]: " << shape_0->ToString() << ".";
100 }
101 }
102 return std::make_shared<abstract::Shape>(output_shape);
103 }
104
InferType(const PrimitivePtr & primitive,const std::vector<AbstractBasePtr> & input_args) const105 TypePtr AddNFuncImpl::InferType(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) const {
106 MS_EXCEPTION_IF_NULL(primitive);
107 const auto &prim_name = primitive->name();
108 AbstractBasePtrList elements = input_args;
109 if (input_args.size() == 1 && input_args[0]->isa<abstract::AbstractSequence>()) {
110 elements = input_args[0]->cast<abstract::AbstractSequencePtr>()->elements();
111 }
112 (void)CheckAndConvertUtils::CheckInteger("concat element num", SizeToLong(elements.size()), kGreaterEqual, 1,
113 prim_name);
114 std::map<std::string, TypePtr> types;
115 (void)types.emplace("element_0", elements[0]->GetType());
116 for (size_t i = 0; i < elements.size(); ++i) {
117 if (elements[i]->IsSameTypeId(abstract::AbstractUndetermined::kTypeId)) {
118 return elements[0]->GetType()->Clone();
119 }
120 std::string element_i = "element_" + std::to_string(i);
121 (void)types.emplace(element_i, elements[i]->GetType());
122 }
123 std::set<TypePtr> valid_types = common_valid_types_with_complex_and_bool;
124 (void)CheckAndConvertUtils::CheckTensorTypeSame(types, valid_types, prim_name);
125 return elements[0]->GetType()->Clone();
126 }
127
AddNInfer(const abstract::AnalysisEnginePtr &,const PrimitivePtr & primitive,const std::vector<AbstractBasePtr> & input_args)128 AbstractBasePtr AddNFuncImpl::AddNInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
129 const std::vector<AbstractBasePtr> &input_args) {
130 MS_EXCEPTION_IF_NULL(primitive);
131 auto prim_name = primitive->name();
132 CheckAndConvertUtils::CheckInputArgs(input_args, kGreaterEqual, 1, prim_name);
133 auto infer_type = InferType(primitive, input_args);
134 auto infer_shape = InferShape(primitive, input_args);
135 return abstract::MakeAbstract(infer_shape, infer_type);
136 }
137
138 } // namespace ops
139 } // namespace mindspore
140