• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020-2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "ops/fusion/max_pool_fusion.h"
18 
19 namespace mindspore {
20 namespace ops {
Init(const std::vector<int64_t> & kernel_size,const std::vector<int64_t> & stride,const PadMode & pad_mode,const Format & format,const std::vector<int64_t> & pad,const RoundMode & round_mode,const bool global,const ActivationType activation_type)21 void MaxPoolFusion::Init(const std::vector<int64_t> &kernel_size, const std::vector<int64_t> &stride,
22                          const PadMode &pad_mode, const Format &format, const std::vector<int64_t> &pad,
23                          const RoundMode &round_mode, const bool global, const ActivationType activation_type) {
24   this->set_pad_mode(pad_mode);
25   this->set_kernel_size(kernel_size);
26   this->set_strides(stride);
27   this->set_format(format);
28   this->set_pad(pad);
29   this->set_round_mode(round_mode);
30   this->set_global(global);
31   this->set_activation_type(activation_type);
32 }
33 
set_global(const bool global)34 void MaxPoolFusion::set_global(const bool global) { (void)AddAttr(kGlobal, MakeValue(global)); }
35 
set_activation_type(ActivationType activation_type)36 void MaxPoolFusion::set_activation_type(ActivationType activation_type) {
37   int64_t swi = activation_type;
38   (void)this->AddAttr(kActivationType, MakeValue(swi));
39 }
40 
get_global() const41 bool MaxPoolFusion::get_global() const {
42   auto value_ptr = GetAttr(kGlobal);
43   MS_EXCEPTION_IF_NULL(value_ptr);
44   return GetValue<bool>(value_ptr);
45 }
46 
get_activation_type() const47 ActivationType MaxPoolFusion::get_activation_type() const {
48   auto value_ptr = GetAttr(kActivationType);
49   MS_EXCEPTION_IF_NULL(value_ptr);
50   return ActivationType(GetValue<int64_t>(value_ptr));
51 }
52 
53 namespace {
InferShape(const PrimitivePtr & primitive,const std::vector<AbstractBasePtr> & input_args)54 abstract::ShapePtr InferShape(const PrimitivePtr &primitive, const std::vector<AbstractBasePtr> &input_args) {
55   MS_EXCEPTION_IF_NULL(primitive);
56   MS_EXCEPTION_IF_NULL(input_args[0]);
57   auto op_name = primitive->name();
58   auto in_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->GetShapeTrack())[kShape];
59   auto format = Format(GetValue<int64_t>(primitive->GetAttr(kFormat)));
60   if (format == NHWC) {
61     in_shape = {in_shape[0], in_shape[3], in_shape[1], in_shape[2]};
62   }
63   const int64_t in_shape_size = 4;
64   (void)CheckAndConvertUtils::CheckInteger("x_rank", SizeToLong(in_shape.size()), kEqual, in_shape_size, op_name);
65   auto kernel_size = GetValue<std::vector<int64_t>>(primitive->GetAttr(kKernelSize));
66   auto pad_mode = PadMode(GetValue<int64_t>(primitive->GetAttr(kPadMode)));
67   auto batch = in_shape[0];
68   auto channel = in_shape[1];
69   auto in_h = in_shape[2];
70   auto in_w = in_shape[3];
71 
72   auto strides = GetValue<std::vector<int64_t>>(primitive->GetAttr(kStrides));
73   auto kernel_h = kernel_size[2];
74   auto kernel_w = kernel_size[3];
75   auto stride_h = strides[2];
76   auto stride_w = strides[3];
77   int64_t out_h = abstract::Shape::SHP_ANY;
78   int64_t out_w = abstract::Shape::SHP_ANY;
79   if (pad_mode == VALID) {
80     out_h = static_cast<int64_t>(ceil((in_h - (kernel_h - 1)) / static_cast<float>(stride_h)));
81     out_w = static_cast<int64_t>(ceil((in_w - (kernel_w - 1)) / static_cast<float>(stride_w)));
82   } else if (pad_mode == SAME) {
83     out_h = static_cast<int64_t>(ceil(in_h / static_cast<float>(stride_h)));
84     out_w = static_cast<int64_t>(ceil(in_w / static_cast<float>(stride_w)));
85   }
86   std::vector<int64_t> out_shape = {batch, channel, out_h, out_w};
87   if (format == NHWC) {
88     out_shape = {batch, out_h, out_w, channel};
89   }
90   if (std::any_of(out_shape.begin(), out_shape.end(), [](int64_t a) { return a <= 0; })) {
91     MS_LOG(EXCEPTION) << "Kernel size is invalid.";
92   }
93   return std::make_shared<abstract::Shape>(out_shape);
94 }
95 
InferType(const std::vector<AbstractBasePtr> & input_args)96 TypePtr InferType(const std::vector<AbstractBasePtr> &input_args) { return input_args[0]->BuildType(); }
97 }  // namespace
98 
MaxPoolFusionInfer(const abstract::AnalysisEnginePtr &,const PrimitivePtr & primitive,const std::vector<AbstractBasePtr> & input_args)99 AbstractBasePtr MaxPoolFusionInfer(const abstract::AnalysisEnginePtr &, const PrimitivePtr &primitive,
100                                    const std::vector<AbstractBasePtr> &input_args) {
101   MS_EXCEPTION_IF_NULL(primitive);
102   for (auto item : input_args) {
103     MS_EXCEPTION_IF_NULL(item);
104   }
105   return std::make_shared<abstract::AbstractTensor>(InferType(input_args), InferShape(primitive, input_args));
106 }
107 REGISTER_PRIMITIVE_C(kNameMaxPoolFusion, MaxPoolFusion);
108 }  // namespace ops
109 }  // namespace mindspore
110