• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2024 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "ops/ops_func_impl/correlate.h"
18 #include <memory>
19 #include "ops/op_utils.h"
20 #include "utils/check_convert_utils.h"
21 
22 namespace mindspore {
23 namespace ops {
InitArray()24 void InitArray() {}
InferShape(const PrimitivePtr & primitive,const std::vector<AbstractBasePtr> & input_args) const25 BaseShapePtr CorrelateFuncImpl::InferShape(const PrimitivePtr &primitive,
26                                            const std::vector<AbstractBasePtr> &input_args) const {
27   auto input_a_shape_ptr = input_args[kIndex0]->GetShape();
28   auto input_v_shape_ptr = input_args[kIndex1]->GetShape();
29   auto input_a_shape = input_a_shape_ptr->GetShapeVector();
30   auto input_v_shape = input_v_shape_ptr->GetShapeVector();
31   if (IsDynamic(input_a_shape) || IsDynamic(input_v_shape)) {
32     return std::make_shared<abstract::Shape>(ShapeVector{abstract::Shape::kShapeDimAny});
33   }
34 
35   auto a_rank = input_a_shape.size();
36   auto v_rank = input_v_shape.size();
37   if (a_rank != 1 || v_rank != 1) {
38     MS_EXCEPTION(ValueError) << "'" << primitive->name() << "' only support 1-dimensional inputs , but got a at "
39                              << a_rank << "-dimensional and got v at " << v_rank << "-dimensional";
40   }
41 
42   int a_len = input_a_shape[0];
43   int v_len = input_v_shape[0];
44   if (a_len == 0 || v_len == 0) {
45     MS_EXCEPTION(ValueError) << "all inputs of '" << primitive->name() << "' cannot be empty , got a at ( " << a_len
46                              << ") and got v at (" << v_len << ")";
47   }
48 
49   int long_len = a_len < v_len ? v_len : a_len;
50   int short_len = a_len < v_len ? a_len : v_len;
51   int out_len = 0;
52   auto mode_v = GetScalarValue<int64_t>(input_args[kInputIndex2]->GetValue());
53   mindspore::PadMode mode_type = static_cast<mindspore::PadMode>(mode_v.value_or(-1));
54   if (mode_type == mindspore::PadMode::VALID)
55     out_len = long_len - short_len + 1;
56   else if (mode_type == mindspore::PadMode::SAME)
57     out_len = long_len;
58   else if (mode_type == mindspore::PadMode::FULL)
59     out_len = long_len + short_len - 1;
60   else
61     MS_EXCEPTION(ValueError) << "For '" << primitive->name()
62                              << "', the mode should be one of [valid, same, full], but got " << mode_type;
63   return std::make_shared<abstract::Shape>(ShapeVector{out_len});
64 }
65 
InferType(const PrimitivePtr & primitive,const std::vector<AbstractBasePtr> & input_args) const66 TypePtr CorrelateFuncImpl::InferType(const PrimitivePtr &primitive,
67                                      const std::vector<AbstractBasePtr> &input_args) const {
68   MS_EXCEPTION_IF_NULL(input_args[kInputIndex0]);
69   auto input_a_type = input_args[kInputIndex0]->GetType();
70   auto input_a_type_id = input_a_type->cast<TensorTypePtr>()->element()->type_id();
71   auto input_v_type = input_args[kInputIndex1]->GetType();
72   auto input_v_type_id = input_v_type->cast<TensorTypePtr>()->element()->type_id();
73   if (input_a_type_id != input_v_type_id) {
74     MS_EXCEPTION(TypeError) << "For '" << primitive->name()
75                             << "' the type of a and v must be same, but got type of a is different from that of v! ";
76   }
77 
78   static const std::vector<TypeId> type_to_float32 = {
79     kNumberTypeInt8,
80     kNumberTypeInt16,
81     kNumberTypeInt32,
82   };
83   static const std::vector<TypeId> type_to_float64 = {kNumberTypeInt64};
84   bool is_type_to_float32 =
85     std::any_of(type_to_float32.begin(), type_to_float32.end(),
86                 [&input_a_type_id](const TypeId &type_id) { return input_a_type_id == type_id; });
87   bool is_type_to_float64 =
88     std::any_of(type_to_float64.begin(), type_to_float64.end(),
89                 [&input_a_type_id](const TypeId &type_id) { return input_a_type_id == type_id; });
90 
91   if (is_type_to_float32) {
92     return std::make_shared<TensorType>(kFloat32);
93   } else if (is_type_to_float64) {
94     return std::make_shared<TensorType>(kFloat64);
95   } else {
96     return input_a_type->Clone();
97   }
98 }
99 
100 }  // namespace ops
101 }  // namespace mindspore
102