• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "nnacl/infer/arithmetic_grad_infer.h"
18 #include "nnacl/arithmetic_parameter.h"
19 #include "nnacl/infer/infer_register.h"
20 #include "nnacl/tensor_c_utils.h"
21 
22 /*
23  * the Arithmetic Grad op include AddGrad, SubGrad, MulGrad, DivGrad, MaximumGrad, MinimumGrad
24  * according to the arithmetic_fp32.h now
25  * the MaximumGrad, MinimumGrad run through MaximumGradInfershape
26  * the AddGrad, SubGrad run through AddSubGradInfershape
27  * the others run through this function
28  * */
ArithmeticGradInferShape(const TensorC * const * inputs,size_t inputs_size,TensorC ** outputs,size_t outputs_size,OpParameter * parameter)29 int ArithmeticGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
30                              OpParameter *parameter) {
31   int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 3, 2);
32   if (check_ret != NNACL_OK) {
33     return check_ret;
34   }
35 
36   const TensorC *dy = inputs[0];
37   const TensorC *x1 = inputs[1];
38   const TensorC *x2 = inputs[2];
39   TensorC *dx1 = outputs[0];
40   TensorC *dx2 = outputs[1];
41 
42   if (dy->shape_size_ > MAX_SHAPE_SIZE || x1->shape_size_ > MAX_SHAPE_SIZE || x2->shape_size_ > MAX_SHAPE_SIZE) {
43     return NNACL_INPUT_TENSOR_ERROR;
44   }
45   int in_shape0[MAX_SHAPE_SIZE] = {0};
46   size_t in_shape0_size = 0;
47   ShapeSet(in_shape0, &in_shape0_size, x1->shape_, x1->shape_size_);
48   int in_shape1[MAX_SHAPE_SIZE] = {0};
49   size_t in_shape1_size = 0;
50   ShapeSet(in_shape1, &in_shape1_size, x2->shape_, x2->shape_size_);
51   int out_shape[MAX_SHAPE_SIZE] = {0};
52   size_t out_shape_size = 0;
53   ShapeSet(out_shape, &out_shape_size, dy->shape_, dy->shape_size_);
54 
55   ArithmeticParameter *param = (ArithmeticParameter *)parameter;
56 
57   if (GetElementNum(dx1) < GetElementNum(dx2)) {
58     param->ndim_ = in_shape1_size;
59     param->in_elements_num0_ = (int)param->ndim_;
60     param->in_elements_num1_ = (int)param->ndim_;
61     param->out_elements_num_ = (int)param->ndim_;
62     size_t fill_dim_num = in_shape1_size - in_shape0_size;  // This will not work for batch!
63     int j = 0;
64     for (unsigned int i = 0; i < in_shape1_size; i++) {
65       if (i < fill_dim_num) {
66         param->in_shape1_[i] = 1;
67       } else {
68         param->in_shape1_[i] = in_shape0[j++];
69       }
70       param->in_shape0_[i] = in_shape1[i];
71       param->out_shape_[i] = out_shape[i];
72     }
73   } else if (GetElementNum(dx2) < GetElementNum(dx1)) {
74     param->ndim_ = in_shape0_size;
75     param->in_elements_num0_ = (int)param->ndim_;
76     param->in_elements_num1_ = (int)param->ndim_;
77     param->out_elements_num_ = (int)param->ndim_;
78     param->broadcasting_ = true;
79     int j = 0;
80     size_t fill_dim_num = in_shape0_size - in_shape1_size;
81     for (unsigned int i = 0; i < in_shape0_size; i++) {
82       if (i < fill_dim_num) {
83         param->in_shape1_[i] = 1;
84       } else {
85         param->in_shape1_[i] = in_shape1[j++];
86       }
87       param->in_shape0_[i] = in_shape0[i];
88       param->out_shape_[i] = out_shape[i];
89     }
90   } else {
91     param->broadcasting_ = false;
92     for (unsigned int i = 0; i < in_shape0_size; i++) {
93       param->in_shape1_[i] = in_shape1[i];
94       param->in_shape0_[i] = in_shape0[i];
95       param->out_shape_[i] = out_shape[i];
96     }
97   }
98 
99   SetShapeTensor(dx1, x1);
100   SetShapeTensor(dx2, x2);
101   dx1->data_type_ = dy->data_type_;
102   dx2->data_type_ = dy->data_type_;
103   return NNACL_OK;
104 }
105 
106 REG_INFER(DivGrad, PrimType_DivGrad, ArithmeticGradInferShape)
107 REG_INFER(MulGrad, PrimType_MulGrad, ArithmeticGradInferShape)
108