• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "nnacl/infer/pooling_grad_infer.h"
18 #include <math.h>
19 #include "nnacl/infer/infer_register.h"
20 
PoolingGradInferShape(const TensorC * const * inputs,size_t inputs_size,TensorC ** outputs,size_t outputs_size,OpParameter * parameter)21 int PoolingGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
22                           OpParameter *parameter) {
23   int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 3, 1);
24   if (check_ret != NNACL_OK) {
25     return check_ret;
26   }
27 
28   const TensorC *input = inputs[0];
29   if (input->format_ != Format_NHWC) {
30     return NNACL_FORMAT_ERROR;
31   }
32   int input_h = input->shape_[1];
33   int input_w = input->shape_[2];
34   if (input->shape_size_ != 4) {
35     return NNACL_INPUT_TENSOR_ERROR;
36   }
37   PoolingParameter *param = (PoolingParameter *)parameter;
38   int window_h = param->window_h_;
39   int window_w = param->window_w_;
40   if (param->global_) {
41     window_h = input_h;
42     window_w = input_w;
43   }
44 
45   if (param->stride_h_ == 0 || param->stride_w_ == 0) {
46     return NNACL_PARAM_INVALID;
47   }
48   if (param->pad_mode_ == Pad_same) {
49     NNACL_CHECK_ZERO_RETURN_ERR(param->stride_w_);
50     NNACL_CHECK_ZERO_RETURN_ERR(param->stride_h_);
51     int output_w = ceil((float)(input_w) / (float)(param->stride_w_));
52     int output_h = ceil((float)(input_h) / (float)(param->stride_h_));
53     int pad_h_all = ((output_h - 1) * param->stride_h_ + (window_h - 1) + 1 - input_h);
54     int pad_w_all = ((output_w - 1) * param->stride_w_ + (window_w - 1) + 1 - input_w);
55     if (pad_h_all < 0) {
56       param->pad_u_ = param->pad_d_ = 0;
57     } else {
58       param->pad_u_ = pad_h_all / 2;
59       param->pad_d_ = pad_h_all - param->pad_u_;
60     }
61     if (pad_w_all < 0) {
62       param->pad_l_ = param->pad_r_ = 0;
63     } else {
64       param->pad_l_ = pad_w_all / 2;
65       param->pad_r_ = pad_w_all - param->pad_l_;
66     }
67   }
68   SetDataTypeFormat(outputs[0], input);
69   SetShapeTensor(outputs[0], input);
70   return NNACL_OK;
71 }
72 
73 REG_INFER(AvgPoolGrad, PrimType_AvgPoolGrad, PoolingGradInferShape)
74 REG_INFER(MaxPoolGrad, PrimType_MaxPoolGrad, PoolingGradInferShape)
75